summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.qmake.conf2
-rw-r--r--qtjsbackend.pro5
-rw-r--r--src/3rdparty/v8/.gitignore1
-rw-r--r--src/3rdparty/v8/AUTHORS1
-rw-r--r--src/3rdparty/v8/ChangeLog375
-rw-r--r--src/3rdparty/v8/Makefile22
-rw-r--r--src/3rdparty/v8/Makefile.android16
-rw-r--r--src/3rdparty/v8/SConstruct24
-rw-r--r--src/3rdparty/v8/build/android.gypi15
-rw-r--r--src/3rdparty/v8/build/common.gypi70
-rwxr-xr-xsrc/3rdparty/v8/build/gyp_v82
-rw-r--r--src/3rdparty/v8/build/standalone.gypi8
-rw-r--r--src/3rdparty/v8/include/v8-profiler.h17
-rw-r--r--src/3rdparty/v8/include/v8.h1380
-rw-r--r--src/3rdparty/v8/samples/lineprocessor.cc9
-rw-r--r--src/3rdparty/v8/samples/process.cc18
-rw-r--r--src/3rdparty/v8/samples/shell.cc29
-rwxr-xr-xsrc/3rdparty/v8/src/SConscript4
-rw-r--r--src/3rdparty/v8/src/accessors.cc211
-rw-r--r--src/3rdparty/v8/src/api.cc1321
-rw-r--r--src/3rdparty/v8/src/api.h6
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm-inl.h43
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.cc597
-rw-r--r--src/3rdparty/v8/src/arm/assembler-arm.h367
-rw-r--r--src/3rdparty/v8/src/arm/builtins-arm.cc118
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.cc2081
-rw-r--r--src/3rdparty/v8/src/arm/code-stubs-arm.h181
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.cc263
-rw-r--r--src/3rdparty/v8/src/arm/codegen-arm.h25
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.cc8
-rw-r--r--src/3rdparty/v8/src/arm/constants-arm.h8
-rw-r--r--src/3rdparty/v8/src/arm/debug-arm.cc2
-rw-r--r--src/3rdparty/v8/src/arm/deoptimizer-arm.cc529
-rw-r--r--src/3rdparty/v8/src/arm/disasm-arm.cc88
-rw-r--r--src/3rdparty/v8/src/arm/frames-arm.h14
-rw-r--r--src/3rdparty/v8/src/arm/full-codegen-arm.cc383
-rw-r--r--src/3rdparty/v8/src/arm/ic-arm.cc210
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.cc364
-rw-r--r--src/3rdparty/v8/src/arm/lithium-arm.h214
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.cc1459
-rw-r--r--src/3rdparty/v8/src/arm/lithium-codegen-arm.h51
-rw-r--r--src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc13
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.cc306
-rw-r--r--src/3rdparty/v8/src/arm/macro-assembler-arm.h75
-rw-r--r--src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc43
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.cc252
-rw-r--r--src/3rdparty/v8/src/arm/simulator-arm.h10
-rw-r--r--src/3rdparty/v8/src/arm/stub-cache-arm.cc1538
-rw-r--r--src/3rdparty/v8/src/array.js9
-rw-r--r--src/3rdparty/v8/src/assembler.cc230
-rw-r--r--src/3rdparty/v8/src/assembler.h84
-rw-r--r--src/3rdparty/v8/src/ast.cc63
-rw-r--r--src/3rdparty/v8/src/ast.h96
-rw-r--r--src/3rdparty/v8/src/atomicops.h4
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_arm_qnx.h8
-rw-r--r--src/3rdparty/v8/src/atomicops_internals_tsan.h335
-rw-r--r--src/3rdparty/v8/src/bootstrapper.cc580
-rw-r--r--src/3rdparty/v8/src/bootstrapper.h19
-rw-r--r--src/3rdparty/v8/src/builtins.cc941
-rw-r--r--src/3rdparty/v8/src/builtins.h84
-rw-r--r--src/3rdparty/v8/src/checks.cc3
-rw-r--r--src/3rdparty/v8/src/circular-queue.cc20
-rw-r--r--src/3rdparty/v8/src/circular-queue.h4
-rw-r--r--src/3rdparty/v8/src/code-stubs-hydrogen.cc366
-rw-r--r--src/3rdparty/v8/src/code-stubs.cc321
-rw-r--r--src/3rdparty/v8/src/code-stubs.h772
-rw-r--r--src/3rdparty/v8/src/codegen.cc54
-rw-r--r--src/3rdparty/v8/src/codegen.h28
-rw-r--r--src/3rdparty/v8/src/compiler.cc227
-rw-r--r--src/3rdparty/v8/src/compiler.h119
-rw-r--r--src/3rdparty/v8/src/contexts.cc28
-rw-r--r--src/3rdparty/v8/src/contexts.h18
-rw-r--r--src/3rdparty/v8/src/conversions-inl.h8
-rw-r--r--src/3rdparty/v8/src/counters.cc7
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.cc60
-rw-r--r--src/3rdparty/v8/src/cpu-profiler.h15
-rw-r--r--src/3rdparty/v8/src/d8-debug.cc4
-rw-r--r--src/3rdparty/v8/src/d8-readline.cc15
-rw-r--r--src/3rdparty/v8/src/d8.cc531
-rw-r--r--src/3rdparty/v8/src/d8.gyp8
-rw-r--r--src/3rdparty/v8/src/d8.h41
-rw-r--r--src/3rdparty/v8/src/d8.js617
-rw-r--r--src/3rdparty/v8/src/data-flow.h55
-rw-r--r--src/3rdparty/v8/src/date.js2
-rw-r--r--src/3rdparty/v8/src/debug-agent.cc19
-rw-r--r--src/3rdparty/v8/src/debug-debugger.js258
-rw-r--r--src/3rdparty/v8/src/debug.cc151
-rw-r--r--src/3rdparty/v8/src/debug.h14
-rw-r--r--src/3rdparty/v8/src/deoptimizer.cc854
-rw-r--r--src/3rdparty/v8/src/deoptimizer.h84
-rw-r--r--src/3rdparty/v8/src/disassembler.cc28
-rw-r--r--src/3rdparty/v8/src/disassembler.h2
-rw-r--r--src/3rdparty/v8/src/elements.cc799
-rw-r--r--src/3rdparty/v8/src/elements.h40
-rw-r--r--src/3rdparty/v8/src/execution.cc74
-rw-r--r--src/3rdparty/v8/src/execution.h9
-rw-r--r--src/3rdparty/v8/src/extensions/externalize-string-extension.cc13
-rw-r--r--src/3rdparty/v8/src/factory.cc132
-rw-r--r--src/3rdparty/v8/src/factory.h57
-rw-r--r--src/3rdparty/v8/src/flag-definitions.h75
-rw-r--r--src/3rdparty/v8/src/frames-inl.h10
-rw-r--r--src/3rdparty/v8/src/frames.cc121
-rw-r--r--src/3rdparty/v8/src/frames.h116
-rw-r--r--src/3rdparty/v8/src/full-codegen.cc262
-rw-r--r--src/3rdparty/v8/src/full-codegen.h27
-rw-r--r--src/3rdparty/v8/src/func-name-inferrer.cc6
-rw-r--r--src/3rdparty/v8/src/global-handles.cc320
-rw-r--r--src/3rdparty/v8/src/global-handles.h40
-rw-r--r--src/3rdparty/v8/src/globals.h6
-rw-r--r--src/3rdparty/v8/src/handles-inl.h77
-rw-r--r--src/3rdparty/v8/src/handles.cc229
-rw-r--r--src/3rdparty/v8/src/handles.h76
-rw-r--r--src/3rdparty/v8/src/heap-inl.h120
-rw-r--r--src/3rdparty/v8/src/heap-profiler.cc54
-rw-r--r--src/3rdparty/v8/src/heap-profiler.h36
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator-inl.h88
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator.cc2703
-rw-r--r--src/3rdparty/v8/src/heap-snapshot-generator.h697
-rw-r--r--src/3rdparty/v8/src/heap.cc1648
-rw-r--r--src/3rdparty/v8/src/heap.h599
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.cc936
-rw-r--r--src/3rdparty/v8/src/hydrogen-instructions.h1538
-rw-r--r--src/3rdparty/v8/src/hydrogen.cc2873
-rw-r--r--src/3rdparty/v8/src/hydrogen.h356
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32-inl.h14
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.cc133
-rw-r--r--src/3rdparty/v8/src/ia32/assembler-ia32.h148
-rw-r--r--src/3rdparty/v8/src/ia32/builtins-ia32.cc87
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.cc1854
-rw-r--r--src/3rdparty/v8/src/ia32/code-stubs-ia32.h126
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.cc232
-rw-r--r--src/3rdparty/v8/src/ia32/codegen-ia32.h18
-rw-r--r--src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc480
-rw-r--r--src/3rdparty/v8/src/ia32/disasm-ia32.cc9
-rw-r--r--src/3rdparty/v8/src/ia32/frames-ia32.h16
-rw-r--r--src/3rdparty/v8/src/ia32/full-codegen-ia32.cc342
-rw-r--r--src/3rdparty/v8/src/ia32/ic-ia32.cc207
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc1437
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h69
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc12
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h4
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.cc365
-rw-r--r--src/3rdparty/v8/src/ia32/lithium-ia32.h214
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc185
-rw-r--r--src/3rdparty/v8/src/ia32/macro-assembler-ia32.h38
-rw-r--r--src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc22
-rw-r--r--src/3rdparty/v8/src/ia32/stub-cache-ia32.cc1325
-rw-r--r--src/3rdparty/v8/src/ic-inl.h16
-rw-r--r--src/3rdparty/v8/src/ic.cc1924
-rw-r--r--src/3rdparty/v8/src/ic.h502
-rw-r--r--src/3rdparty/v8/src/incremental-marking-inl.h27
-rw-r--r--src/3rdparty/v8/src/incremental-marking.cc149
-rw-r--r--src/3rdparty/v8/src/incremental-marking.h15
-rw-r--r--src/3rdparty/v8/src/interface.cc17
-rw-r--r--src/3rdparty/v8/src/interface.h35
-rw-r--r--src/3rdparty/v8/src/interpreter-irregexp.cc4
-rw-r--r--src/3rdparty/v8/src/isolate.cc424
-rw-r--r--src/3rdparty/v8/src/isolate.h116
-rw-r--r--src/3rdparty/v8/src/json-parser.h128
-rw-r--r--src/3rdparty/v8/src/json-stringifier.h202
-rw-r--r--src/3rdparty/v8/src/json.js4
-rw-r--r--src/3rdparty/v8/src/jsregexp-inl.h106
-rw-r--r--src/3rdparty/v8/src/jsregexp.cc304
-rw-r--r--src/3rdparty/v8/src/jsregexp.h69
-rw-r--r--src/3rdparty/v8/src/list-inl.h11
-rw-r--r--src/3rdparty/v8/src/list.h3
-rw-r--r--src/3rdparty/v8/src/lithium-allocator-inl.h24
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.cc66
-rw-r--r--src/3rdparty/v8/src/lithium-allocator.h53
-rw-r--r--src/3rdparty/v8/src/lithium.cc62
-rw-r--r--src/3rdparty/v8/src/lithium.h30
-rw-r--r--src/3rdparty/v8/src/liveedit-debugger.js37
-rw-r--r--src/3rdparty/v8/src/liveedit.cc334
-rw-r--r--src/3rdparty/v8/src/liveobjectlist-inl.h126
-rw-r--r--src/3rdparty/v8/src/liveobjectlist.cc2631
-rw-r--r--src/3rdparty/v8/src/liveobjectlist.h319
-rw-r--r--src/3rdparty/v8/src/log-utils.cc8
-rw-r--r--src/3rdparty/v8/src/log.cc335
-rw-r--r--src/3rdparty/v8/src/log.h85
-rw-r--r--src/3rdparty/v8/src/macro-assembler.h17
-rw-r--r--src/3rdparty/v8/src/macros.py3
-rw-r--r--src/3rdparty/v8/src/mark-compact.cc548
-rw-r--r--src/3rdparty/v8/src/mark-compact.h109
-rw-r--r--src/3rdparty/v8/src/marking-thread.cc85
-rw-r--r--src/3rdparty/v8/src/marking-thread.h (renamed from src/3rdparty/v8/src/inspector.cc)56
-rw-r--r--src/3rdparty/v8/src/math.js30
-rw-r--r--src/3rdparty/v8/src/messages.cc47
-rw-r--r--src/3rdparty/v8/src/messages.h8
-rw-r--r--src/3rdparty/v8/src/messages.js207
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips-inl.h54
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.cc141
-rw-r--r--src/3rdparty/v8/src/mips/assembler-mips.h72
-rw-r--r--src/3rdparty/v8/src/mips/builtins-mips.cc134
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.cc1923
-rw-r--r--src/3rdparty/v8/src/mips/code-stubs-mips.h171
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.cc290
-rw-r--r--src/3rdparty/v8/src/mips/codegen-mips.h22
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.cc2
-rw-r--r--src/3rdparty/v8/src/mips/constants-mips.h14
-rw-r--r--src/3rdparty/v8/src/mips/deoptimizer-mips.cc482
-rw-r--r--src/3rdparty/v8/src/mips/disasm-mips.cc13
-rw-r--r--src/3rdparty/v8/src/mips/frames-mips.h24
-rw-r--r--src/3rdparty/v8/src/mips/full-codegen-mips.cc346
-rw-r--r--src/3rdparty/v8/src/mips/ic-mips.cc204
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.cc1214
-rw-r--r--src/3rdparty/v8/src/mips/lithium-codegen-mips.h39
-rw-r--r--src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc7
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.cc286
-rw-r--r--src/3rdparty/v8/src/mips/lithium-mips.h177
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.cc158
-rw-r--r--src/3rdparty/v8/src/mips/macro-assembler-mips.h43
-rw-r--r--src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc18
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.cc134
-rw-r--r--src/3rdparty/v8/src/mips/simulator-mips.h5
-rw-r--r--src/3rdparty/v8/src/mips/stub-cache-mips.cc1519
-rw-r--r--src/3rdparty/v8/src/mirror-debugger.js25
-rw-r--r--src/3rdparty/v8/src/mksnapshot.cc2
-rw-r--r--src/3rdparty/v8/src/object-observe.js111
-rw-r--r--src/3rdparty/v8/src/objects-debug.cc82
-rw-r--r--src/3rdparty/v8/src/objects-inl.h774
-rw-r--r--src/3rdparty/v8/src/objects-printer.cc114
-rw-r--r--src/3rdparty/v8/src/objects-visiting-inl.h77
-rw-r--r--src/3rdparty/v8/src/objects-visiting.cc7
-rw-r--r--src/3rdparty/v8/src/objects-visiting.h70
-rw-r--r--src/3rdparty/v8/src/objects.cc2885
-rw-r--r--src/3rdparty/v8/src/objects.h1678
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.cc27
-rw-r--r--src/3rdparty/v8/src/optimizing-compiler-thread.h22
-rw-r--r--src/3rdparty/v8/src/parser.cc208
-rw-r--r--src/3rdparty/v8/src/parser.h1
-rw-r--r--src/3rdparty/v8/src/platform-cygwin.cc47
-rw-r--r--src/3rdparty/v8/src/platform-freebsd.cc93
-rw-r--r--src/3rdparty/v8/src/platform-linux.cc242
-rw-r--r--src/3rdparty/v8/src/platform-macos.cc52
-rw-r--r--src/3rdparty/v8/src/platform-nullos.cc32
-rw-r--r--src/3rdparty/v8/src/platform-openbsd.cc80
-rw-r--r--src/3rdparty/v8/src/platform-posix.cc9
-rw-r--r--src/3rdparty/v8/src/platform-qnx.cc94
-rw-r--r--src/3rdparty/v8/src/platform-solaris.cc82
-rw-r--r--src/3rdparty/v8/src/platform-win32.cc66
-rw-r--r--src/3rdparty/v8/src/platform.h40
-rw-r--r--src/3rdparty/v8/src/preparse-data.cc4
-rw-r--r--src/3rdparty/v8/src/preparse-data.h2
-rw-r--r--src/3rdparty/v8/src/preparser.h4
-rw-r--r--src/3rdparty/v8/src/prettyprinter.cc20
-rw-r--r--src/3rdparty/v8/src/prettyprinter.h2
-rw-r--r--src/3rdparty/v8/src/profile-generator-inl.h51
-rw-r--r--src/3rdparty/v8/src/profile-generator.cc2661
-rw-r--r--src/3rdparty/v8/src/profile-generator.h652
-rw-r--r--src/3rdparty/v8/src/property-details.h4
-rw-r--r--src/3rdparty/v8/src/property.cc2
-rw-r--r--src/3rdparty/v8/src/property.h117
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.cc30
-rw-r--r--src/3rdparty/v8/src/regexp-macro-assembler.h4
-rw-r--r--src/3rdparty/v8/src/regexp.js38
-rw-r--r--src/3rdparty/v8/src/rewriter.cc17
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.cc33
-rw-r--r--src/3rdparty/v8/src/runtime-profiler.h25
-rw-r--r--src/3rdparty/v8/src/runtime.cc2843
-rw-r--r--src/3rdparty/v8/src/runtime.h92
-rw-r--r--src/3rdparty/v8/src/safepoint-table.cc11
-rw-r--r--src/3rdparty/v8/src/scanner.h14
-rw-r--r--src/3rdparty/v8/src/scopeinfo.cc81
-rw-r--r--src/3rdparty/v8/src/scopeinfo.h67
-rw-r--r--src/3rdparty/v8/src/scopes.cc265
-rw-r--r--src/3rdparty/v8/src/scopes.h36
-rw-r--r--src/3rdparty/v8/src/serialize.cc41
-rw-r--r--src/3rdparty/v8/src/serialize.h8
-rw-r--r--src/3rdparty/v8/src/smart-pointers.h12
-rw-r--r--src/3rdparty/v8/src/spaces.cc563
-rw-r--r--src/3rdparty/v8/src/spaces.h203
-rw-r--r--src/3rdparty/v8/src/store-buffer.cc11
-rw-r--r--src/3rdparty/v8/src/store-buffer.h4
-rw-r--r--src/3rdparty/v8/src/string-search.h28
-rw-r--r--src/3rdparty/v8/src/string-stream.cc20
-rw-r--r--src/3rdparty/v8/src/string.js203
-rw-r--r--src/3rdparty/v8/src/stub-cache.cc970
-rw-r--r--src/3rdparty/v8/src/stub-cache.h396
-rw-r--r--src/3rdparty/v8/src/sweeper-thread.cc103
-rw-r--r--src/3rdparty/v8/src/sweeper-thread.h75
-rw-r--r--src/3rdparty/v8/src/symbol.js39
-rw-r--r--src/3rdparty/v8/src/token.h12
-rw-r--r--src/3rdparty/v8/src/type-info.cc300
-rw-r--r--src/3rdparty/v8/src/type-info.h61
-rw-r--r--src/3rdparty/v8/src/unicode-inl.h168
-rw-r--r--src/3rdparty/v8/src/unicode.cc124
-rw-r--r--src/3rdparty/v8/src/unicode.h140
-rw-r--r--src/3rdparty/v8/src/uri.h309
-rw-r--r--src/3rdparty/v8/src/uri.js86
-rw-r--r--src/3rdparty/v8/src/utils.h51
-rw-r--r--src/3rdparty/v8/src/v8-counters.cc11
-rw-r--r--src/3rdparty/v8/src/v8-counters.h19
-rw-r--r--src/3rdparty/v8/src/v8.cc33
-rw-r--r--src/3rdparty/v8/src/v8.h1
-rw-r--r--src/3rdparty/v8/src/v8conversions.cc48
-rw-r--r--src/3rdparty/v8/src/v8globals.h59
-rw-r--r--src/3rdparty/v8/src/v8natives.js156
-rw-r--r--src/3rdparty/v8/src/v8threads.cc40
-rw-r--r--src/3rdparty/v8/src/v8utils.cc93
-rw-r--r--src/3rdparty/v8/src/v8utils.h67
-rw-r--r--src/3rdparty/v8/src/variables.cc8
-rw-r--r--src/3rdparty/v8/src/variables.h4
-rw-r--r--src/3rdparty/v8/src/version.cc4
-rw-r--r--src/3rdparty/v8/src/vm-state-inl.h13
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64-inl.h2
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.cc117
-rw-r--r--src/3rdparty/v8/src/x64/assembler-x64.h44
-rw-r--r--src/3rdparty/v8/src/x64/builtins-x64.cc95
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.cc1601
-rw-r--r--src/3rdparty/v8/src/x64/code-stubs-x64.h132
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.cc182
-rw-r--r--src/3rdparty/v8/src/x64/codegen-x64.h23
-rw-r--r--src/3rdparty/v8/src/x64/deoptimizer-x64.cc458
-rw-r--r--src/3rdparty/v8/src/x64/disasm-x64.cc12
-rw-r--r--src/3rdparty/v8/src/x64/frames-x64.h14
-rw-r--r--src/3rdparty/v8/src/x64/full-codegen-x64.cc350
-rw-r--r--src/3rdparty/v8/src/x64/ic-x64.cc209
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.cc1111
-rw-r--r--src/3rdparty/v8/src/x64/lithium-codegen-x64.h51
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.cc279
-rw-r--r--src/3rdparty/v8/src/x64/lithium-x64.h153
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.cc172
-rw-r--r--src/3rdparty/v8/src/x64/macro-assembler-x64.h52
-rw-r--r--src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc23
-rw-r--r--src/3rdparty/v8/src/x64/stub-cache-x64.cc1264
-rw-r--r--src/3rdparty/v8/test/cctest/SConscript1
-rw-r--r--src/3rdparty/v8/test/cctest/cctest.cc4
-rw-r--r--src/3rdparty/v8/test/cctest/cctest.gyp3
-rw-r--r--src/3rdparty/v8/test/cctest/cctest.h47
-rw-r--r--src/3rdparty/v8/test/cctest/cctest.status3
-rw-r--r--src/3rdparty/v8/test/cctest/test-accessors.cc33
-rw-r--r--src/3rdparty/v8/test/cctest/test-alloc.cc32
-rw-r--r--src/3rdparty/v8/test/cctest/test-api.cc1096
-rw-r--r--src/3rdparty/v8/test/cctest/test-assembler-arm.cc218
-rw-r--r--src/3rdparty/v8/test/cctest/test-assembler-ia32.cc163
-rw-r--r--src/3rdparty/v8/test/cctest/test-assembler-mips.cc42
-rw-r--r--src/3rdparty/v8/test/cctest/test-assembler-x64.cc94
-rw-r--r--src/3rdparty/v8/test/cctest/test-compiler.cc35
-rw-r--r--src/3rdparty/v8/test/cctest/test-cpu-profiler.cc17
-rw-r--r--src/3rdparty/v8/test/cctest/test-debug.cc61
-rw-r--r--src/3rdparty/v8/test/cctest/test-decls.cc175
-rw-r--r--src/3rdparty/v8/test/cctest/test-dictionary.cc6
-rw-r--r--src/3rdparty/v8/test/cctest/test-disasm-arm.cc132
-rw-r--r--src/3rdparty/v8/test/cctest/test-disasm-ia32.cc10
-rw-r--r--src/3rdparty/v8/test/cctest/test-disasm-x64.cc2
-rw-r--r--src/3rdparty/v8/test/cctest/test-global-object.cc51
-rw-r--r--src/3rdparty/v8/test/cctest/test-hashing.cc55
-rw-r--r--src/3rdparty/v8/test/cctest/test-heap-profiler.cc143
-rw-r--r--src/3rdparty/v8/test/cctest/test-heap.cc827
-rw-r--r--src/3rdparty/v8/test/cctest/test-lockers.cc27
-rw-r--r--src/3rdparty/v8/test/cctest/test-log-stack-tracer.cc3
-rw-r--r--src/3rdparty/v8/test/cctest/test-log.cc18
-rwxr-xr-xsrc/3rdparty/v8/test/cctest/test-macro-assembler-x64.cc158
-rw-r--r--src/3rdparty/v8/test/cctest/test-mark-compact.cc63
-rw-r--r--src/3rdparty/v8/test/cctest/test-object-observe.cc240
-rw-r--r--[-rwxr-xr-x]src/3rdparty/v8/test/cctest/test-parsing.cc88
-rw-r--r--src/3rdparty/v8/test/cctest/test-platform.cc99
-rw-r--r--src/3rdparty/v8/test/cctest/test-random.cc5
-rw-r--r--src/3rdparty/v8/test/cctest/test-regexp.cc28
-rw-r--r--src/3rdparty/v8/test/cctest/test-serialize.cc39
-rw-r--r--src/3rdparty/v8/test/cctest/test-spaces.cc139
-rw-r--r--src/3rdparty/v8/test/cctest/test-strings.cc782
-rw-r--r--src/3rdparty/v8/test/cctest/test-symbols.cc63
-rw-r--r--src/3rdparty/v8/test/cctest/test-thread-termination.cc21
-rw-r--r--src/3rdparty/v8/test/cctest/test-threads.cc8
-rw-r--r--src/3rdparty/v8/test/cctest/test-weakmaps.cc27
-rw-r--r--src/3rdparty/v8/test/cctest/testcfg.py11
-rw-r--r--src/3rdparty/v8/test/message/overwritten-builtins.out3
-rw-r--r--src/3rdparty/v8/test/mjsunit/allocation-site-info.js272
-rw-r--r--src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js101
-rw-r--r--src/3rdparty/v8/test/mjsunit/array-natives-elements.js318
-rwxr-xr-xsrc/3rdparty/v8/test/mjsunit/array-reduce.js16
-rw-r--r--src/3rdparty/v8/test/mjsunit/array-slice.js12
-rw-r--r--src/3rdparty/v8/test/mjsunit/array-store-and-grow.js5
-rw-r--r--src/3rdparty/v8/test/mjsunit/big-array-literal.js4
-rw-r--r--src/3rdparty/v8/test/mjsunit/builtins.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/inline-closures.js49
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/inline-function-apply.js89
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/multiply-add.js69
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/multiply-sub.js56
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/parallel-proto-change.js44
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/property-static.js69
-rw-r--r--src/3rdparty/v8/test/mjsunit/compiler/proto-chain-constant.js55
-rw-r--r--src/3rdparty/v8/test/mjsunit/constant-folding-2.js258
-rw-r--r--src/3rdparty/v8/test/mjsunit/debug-liveedit-compile-error.js60
-rw-r--r--src/3rdparty/v8/test/mjsunit/debug-liveedit-literals.js94
-rw-r--r--src/3rdparty/v8/test/mjsunit/debug-set-variable-value.js308
-rw-r--r--src/3rdparty/v8/test/mjsunit/elements-kind.js4
-rw-r--r--src/3rdparty/v8/test/mjsunit/elements-transition.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/error-constructors.js15
-rw-r--r--src/3rdparty/v8/test/mjsunit/error-tostring.js8
-rw-r--r--src/3rdparty/v8/test/mjsunit/eval-stack-trace.js9
-rw-r--r--src/3rdparty/v8/test/mjsunit/fast-prototype.js4
-rw-r--r--src/3rdparty/v8/test/mjsunit/function-call.js32
-rw-r--r--src/3rdparty/v8/test/mjsunit/fuzz-natives-part1.js10
-rw-r--r--src/3rdparty/v8/test/mjsunit/fuzz-natives-part2.js9
-rw-r--r--src/3rdparty/v8/test/mjsunit/fuzz-natives-part3.js9
-rw-r--r--src/3rdparty/v8/test/mjsunit/fuzz-natives-part4.js9
-rw-r--r--src/3rdparty/v8/test/mjsunit/generated-transition-stub.js218
-rw-r--r--src/3rdparty/v8/test/mjsunit/harmony/module-linking.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/harmony/object-observe.js505
-rw-r--r--src/3rdparty/v8/test/mjsunit/harmony/proxies.js1
-rw-r--r--src/3rdparty/v8/test/mjsunit/harmony/symbols.js127
-rw-r--r--src/3rdparty/v8/test/mjsunit/json-parser-recursive.js33
-rw-r--r--src/3rdparty/v8/test/mjsunit/json-stringify-recursive.js (renamed from src/3rdparty/v8/test/mjsunit/json-recursive.js)9
-rw-r--r--src/3rdparty/v8/test/mjsunit/json.js27
-rw-r--r--src/3rdparty/v8/test/mjsunit/manual-parallel-recompile.js79
-rw-r--r--src/3rdparty/v8/test/mjsunit/math-exp-precision.js64
-rw-r--r--src/3rdparty/v8/test/mjsunit/math-floor-of-div-minus-zero.js1
-rw-r--r--src/3rdparty/v8/test/mjsunit/math-floor-of-div-nosudiv.js288
-rw-r--r--src/3rdparty/v8/test/mjsunit/math-floor-of-div.js90
-rw-r--r--src/3rdparty/v8/test/mjsunit/mjsunit.status16
-rwxr-xr-xsrc/3rdparty/v8/test/mjsunit/regexp-capture-3.js33
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-121407.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-147497.js (renamed from src/3rdparty/v8/src/inspector.h)39
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-164442.js45
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-165637.js61
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-166379.js39
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-166553.js33
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-1692.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-171641.js40
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-1980.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2073.js99
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2185.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2243.js31
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2263.js30
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2315.js40
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2410.js36
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2416.js75
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2419.js37
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2433.js36
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2437.js156
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2438.js52
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2441.js31
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2443.js129
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2444.js120
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2451.js41
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2489.js50
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2499.js40
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2537.js45
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-2539.js55
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-492.js40
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-135066.js14
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-146910.js15
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-160010.js4
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-162085.js71
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-163530.js80
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-168545.js34
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-170856.js33
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-172345.js34
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173907.js88
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173974.js36
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-178790.js52
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-crbug-18639.js14
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-json-stringify-gc.js2
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-latin-1.js90
-rw-r--r--src/3rdparty/v8/test/mjsunit/regress/regress-observe-empty-double-array.js38
-rw-r--r--src/3rdparty/v8/test/mjsunit/shift-for-integer-div.js59
-rw-r--r--src/3rdparty/v8/test/mjsunit/stack-traces-gc.js119
-rw-r--r--src/3rdparty/v8/test/mjsunit/stack-traces-overflow.js122
-rw-r--r--src/3rdparty/v8/test/mjsunit/stack-traces.js40
-rw-r--r--src/3rdparty/v8/test/mjsunit/strict-mode.js47
-rw-r--r--src/3rdparty/v8/test/mjsunit/string-natives.js72
-rw-r--r--src/3rdparty/v8/test/mjsunit/string-replace.js61
-rw-r--r--src/3rdparty/v8/test/mjsunit/string-split.js17
-rw-r--r--src/3rdparty/v8/test/mjsunit/testcfg.py2
-rw-r--r--src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test-func-info.log12
-rw-r--r--src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test.log38
-rw-r--r--src/3rdparty/v8/test/mjsunit/tools/tickprocessor.js5
-rw-r--r--src/3rdparty/v8/test/mjsunit/uri.js12
-rw-r--r--src/3rdparty/v8/test/mozilla/mozilla.status35
-rw-r--r--src/3rdparty/v8/test/test262/README4
-rw-r--r--src/3rdparty/v8/test/test262/test262.status15
-rw-r--r--src/3rdparty/v8/test/test262/testcfg.py11
-rw-r--r--src/3rdparty/v8/tools/disasm.py8
-rw-r--r--src/3rdparty/v8/tools/gen-postmortem-metadata.py12
-rwxr-xr-xsrc/3rdparty/v8/tools/grokdump.py353
-rw-r--r--src/3rdparty/v8/tools/gyp/v8.gyp76
-rwxr-xr-xsrc/3rdparty/v8/tools/ll_prof.py63
-rwxr-xr-xsrc/3rdparty/v8/tools/plot-timer-events70
-rw-r--r--src/3rdparty/v8/tools/plot-timer-events.js510
-rwxr-xr-xsrc/3rdparty/v8/tools/run-llprof.sh69
-rwxr-xr-xsrc/3rdparty/v8/tools/run-tests.py13
-rwxr-xr-xsrc/3rdparty/v8/tools/run-valgrind.py2
-rwxr-xr-xsrc/3rdparty/v8/tools/test.py6
-rw-r--r--src/3rdparty/v8/tools/testrunner/local/execution.py6
-rw-r--r--src/3rdparty/v8/tools/testrunner/local/testsuite.py3
-rw-r--r--src/3rdparty/v8/tools/testrunner/objects/context.py4
-rw-r--r--src/3rdparty/v8/tools/testrunner/server/compression.py1
-rw-r--r--src/3rdparty/v8/tools/tick-processor.html168
-rw-r--r--src/3rdparty/v8/tools/tickprocessor-driver.js4
-rw-r--r--src/3rdparty/v8/tools/tickprocessor.js52
-rw-r--r--src/v8/v8.pri30
493 files changed, 58426 insertions, 37293 deletions
diff --git a/.qmake.conf b/.qmake.conf
index bf1b71f..2f09a2b 100644
--- a/.qmake.conf
+++ b/.qmake.conf
@@ -1,3 +1,3 @@
load(qt_build_config)
-MODULE_VERSION = 5.1.2
+MODULE_VERSION = 5.2.0
diff --git a/qtjsbackend.pro b/qtjsbackend.pro
index f2bd8e7..02737b6 100644
--- a/qtjsbackend.pro
+++ b/qtjsbackend.pro
@@ -2,3 +2,8 @@ load(configure)
qtCompileTest(hardfloat)
load(qt_parts)
+
+ios {
+ log("The qtjsbackend was disabled from the build because V8 is not ported to iOS.")
+ SUBDIRS=
+}
diff --git a/src/3rdparty/v8/.gitignore b/src/3rdparty/v8/.gitignore
index ec0660f..fe8425f 100644
--- a/src/3rdparty/v8/.gitignore
+++ b/src/3rdparty/v8/.gitignore
@@ -18,6 +18,7 @@
#*#
*~
.cpplint-cache
+.d8_history
d8
d8_g
shell
diff --git a/src/3rdparty/v8/AUTHORS b/src/3rdparty/v8/AUTHORS
index c279e7c..d25fc5a 100644
--- a/src/3rdparty/v8/AUTHORS
+++ b/src/3rdparty/v8/AUTHORS
@@ -34,6 +34,7 @@ Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Jonathan Liu <net147@gmail.com>
Kun Zhang <zhangk@codeaurora.org>
+Luis Reis <luis.m.reis@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Mathias Bynens <mathias@qiwi.be>
Matt Hanselman <mjhanselman@gmail.com>
diff --git a/src/3rdparty/v8/ChangeLog b/src/3rdparty/v8/ChangeLog
index bb9ed30..4bcf89e 100644
--- a/src/3rdparty/v8/ChangeLog
+++ b/src/3rdparty/v8/ChangeLog
@@ -1,3 +1,378 @@
+2013-03-04: Version 3.17.7
+
+ Limited recursion in regexp compilation by a budget.
+ (Chromium issue 178790)
+
+ ES6 symbols: Implemented Symbol intrinsic and basic functionality
+ (issue 2158)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-28: Version 3.17.6
+
+ Fixed materialization of arguments objects with unknown values.
+ (Chromium issue 163530)
+
+ Set default number of sweeper threads to at most four.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-27: Version 3.17.5
+
+ Made __proto__ a foreign callback on Object.prototype.
+ (issue 621, issue 1949 and issue 2441)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-25: Version 3.17.4
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-21: Version 3.17.3
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-19: Version 3.17.2
+
+ Removed bogus check for TOP register in deoptimizer.
+ (Chromium issue 176943)
+
+ Made the Isolate parameter mandatory for internal HandleScopes.
+ (issue 2487)
+
+ Fixed f.apply() optimization when declared arguments are mutated.
+ (issue 2539)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-14: Version 3.17.1
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-13: Version 3.17.0
+
+ Enabled parallel sweeping.
+
+ Don't try to unlink instructions twice during GVN
+ (Chromium issue 175141)
+
+ Fixed code flusher disabling while marking incrementally.
+ (Chromium issue 173458, 168582)
+
+ Don't use TLS for space iterators.
+ (issue 2531)
+
+ Added new GetHeapStatistics API entry and deprecated old one.
+
+ Fixed DoubleStackSlot-to-DoubleStackSlot moves on ia32. Unified
+ platform-independent code.
+ (Chromium issue 173907)
+
+ Added --trace-array-abuse to help find OOB accesses.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-06: Version 3.16.14
+
+ Performance and stability improvements on all platforms.
+
+
+2013-02-04: Version 3.16.13
+
+ Tagged stubs that rely on instance types as MEGAMORPHIC.
+ (Chromium issue 173974)
+
+ Fixed clearing of dead dependent codes and verifing of weak
+ embedded maps on full GC. (Chromium issue 172488,172489)
+
+ Made the arm port build cleanly with Clang.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-31: Version 3.16.12
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-30: Version 3.16.11
+
+ Put making embedded maps in optimized code weak behind a flag.
+ (Chromium issue 172488,172489)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-25: Version 3.16.10
+
+ Avoid excessive memory usage during redundant phi elimination.
+ (issue 2510)
+
+ Fixed additional spec violations wrt RegExp.lastIndex.
+ (issue 2437)
+
+ Added Isolate parameter to Persistent class.
+ (issue 2487)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-24: Version 3.16.9
+
+ Made embedded maps in optimized code weak.
+ (issue 2073)
+
+ Fixed corner case when JSFunction is evicted from flusher.
+ (Chromium issue 168801)
+
+ Correctly set kCanBeDivByZero flag for HMathFloorOfDiv.
+ (Chromium issue 171641)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-23: Version 3.16.8
+
+ Correctly reset lastIndex in an RegExp object.
+ (Chromium issue 170856)
+
+ Added a workaround for Windows compilation problems related to V8EXPORT.
+ (issue 2507)
+
+ tools/run-tests.py: shlex.split() the value of --command-prefix
+ (Chromium issue 171553)
+
+ Fixed pattern detection for replacing shifts by rotation.
+ (Chromium issue 2499)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-21: Version 3.16.7
+
+ Removed <(library) usage from v8.gyp.
+ (Chromium issue 111541)
+
+ Fixed out of bounds memory access in TestJSArrayForAllocationSiteInfo.
+ (Chromium issue 169928)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-18: Version 3.16.6
+
+ Made the Isolate parameter mandatory in Locker and Unlocker classes.
+ (issue 2487)
+
+ Avoid pointer underflow in CopyCharsUnsigned.
+ (issue 2493)
+
+ Generate shim headers when using system v8.
+ (Chromium issue 165264)
+
+ Fixed arguments materialization for inlined apply().
+ (issue 2489)
+
+ Sync'ed laziness between BuildFunctionInfo and MakeFunctionInfo.
+ (Chromium issue 147497)
+
+ Added sanity check to CodeFlusher::AddCandidate.
+ (Chromium issue 169209)
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-15: Version 3.16.5
+
+ Removed deprecated functions from V8's external API.
+
+ Prepared API for WebKit use of Latin-1.
+
+ Fixed V8 issue 2486.
+
+ Fixed Chromium issue 169723.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-11: Version 3.16.4
+
+ Fixed Chromium issues 168545 and 169209.
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-09: Version 3.16.3
+
+ Improved GC performance when moving parts of a FixedArray (issue 2452).
+
+ Enabled readline on d8 while building a shared lib (issue 1781).
+
+ Fixed missing exception check in typed array constructor
+ (Chromium issue 168545).
+
+ Check for read-only-ness when preparing for array sort (issue 2419).
+
+ Performance and stability improvements on all platforms.
+
+
+2013-01-04: Version 3.16.2
+
+ Added Makefile options to build for the Raspberry Pi (armv7=0,
+ arm_fpu=vfp2).
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-27: Version 3.16.1
+
+ Fixed x64 MathMinMax for negative untagged int32 arguments.
+ (Chromium issue 164442)
+
+ Fixed FloatingPointHelper::CheckSSE2OperandIsInt32.
+ (issue 2458)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-21: Version 3.16.0
+
+ V8_Fatal now prints C++ stack trace in debug mode.
+
+ Added HTML-based tick processor.
+
+ Continued implementation of Object.observe (V8 issue 2409).
+
+ Fixed V8 issues 2243, 2340, 2393, 2399, 2457.
+
+ Fixed Chromium issues 125308, 165637, 166379, 166553.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-10: Version 3.15.11
+
+ Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
+ flags.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-07: Version 3.15.10
+
+ Enabled optimisation of functions inside eval. (issue 2315)
+
+ Fixed spec violations in methods of Number.prototype. (issue 2443)
+
+ Added GCTracer metrics for a scavenger GC for DOM wrappers.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-06: Version 3.15.9
+
+ Fixed candidate eviction in code flusher.
+ (Chromium issue 159140)
+
+ Iterate through all arguments for side effects in Math.min/max.
+ (issue 2444)
+
+ Fixed spec violations related to regexp.lastIndex
+ (issue 2437, issue 2438)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-12-04: Version 3.15.8
+
+ Enforced stack allocation of TryCatch blocks.
+ (issue 2166,chromium:152389)
+
+ Fixed external exceptions in external try-catch handlers.
+ (issue 2166)
+
+ Activated incremental code flushing by default.
+
+ Performance and stability improvements on all platforms.
+
+
+2012-11-30: Version 3.15.7
+
+ Activated code aging by default.
+
+ Included more information in --prof log.
+
+ Removed eager sweeping for lazy swept spaces. Try to find in
+ SlowAllocateRaw a bounded number of times a big enough memory slot.
+ (issue 2194)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-11-26: Version 3.15.6
+
+ Ensure double arrays are filled with holes when extended from
+ variations of empty arrays. (Chromium issue 162085)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-11-23: Version 3.15.5
+
+ Fixed JSON.stringify for objects with interceptor handlers.
+ (Chromium issue 161028)
+
+ Fixed corner case in x64 compare stubs. (issue 2416)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-11-16: Version 3.15.4
+
+ Fixed Array.prototype.join evaluation order. (issue 2263)
+
+ Perform CPU sampling by CPU sampling thread only iff processing thread
+ is not running. (issue 2364)
+
+ When using an Object as a set in Object.getOwnPropertyNames, null out
+ the proto. (issue 2410)
+
+ Disabled EXTRA_CHECKS in Release build.
+
+ Heap explorer: Show representation of strings.
+
+ Removed 'type' and 'arguments' properties from Error object.
+ (issue 2397)
+
+ Added atomics implementation for ThreadSanitizer v2.
+ (Chromium issue 128314)
+
+ Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
+
+ Performance and stability improvements on all platforms.
+
+
+2012-11-13: Version 3.15.3
+
+ Changed sample shell to send non-JS output (e.g. errors) to stderr
+ instead of stdout.
+
+ Correctly check for stack overflow even when interrupt is pending.
+ (issue 214)
+
+ Collect stack trace on stack overflow. (issue 2394)
+
+ Performance and stability improvements on all platforms.
+
+
2012-11-12: Version 3.15.2
Function::GetScriptOrigin supplies sourceURL when script name is
diff --git a/src/3rdparty/v8/Makefile b/src/3rdparty/v8/Makefile
index b65ea4c..8e550d0 100644
--- a/src/3rdparty/v8/Makefile
+++ b/src/3rdparty/v8/Makefile
@@ -62,6 +62,12 @@ endif
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
+# backtrace=off
+ifeq ($(backtrace), off)
+ GYPFLAGS += -Dv8_enable_backtrace=0
+else
+ GYPFLAGS += -Dv8_enable_backtrace=1
+endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
@@ -77,15 +83,17 @@ endif
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
endif
-# liveobjectlist=on
-ifeq ($(liveobjectlist), on)
- GYPFLAGS += -Dv8_use_liveobjectlist=true
+# vfp2=off
+ifeq ($(vfp2), off)
+ GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
+else
+ GYPFLAGS += -Dv8_can_use_vfp2_instructions=true -Darm_fpu=vfpv2
endif
# vfp3=off
ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
else
- GYPFLAGS += -Dv8_can_use_vfp3_instructions=true
+ GYPFLAGS += -Dv8_can_use_vfp3_instructions=true -Darm_fpu=vfpv3
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
@@ -115,6 +123,10 @@ endif
ifeq ($(hardfp), on)
GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
endif
+# armv7=false
+ifeq ($(armv7), false)
+ GYPFLAGS += -Darmv7=0
+endif
# ----------------- available targets: --------------------
# - "dependencies": pulls in external dependencies (currently: GYP)
@@ -136,7 +148,7 @@ endif
ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm
+ANDROID_ARCHES = android_ia32 android_arm android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
diff --git a/src/3rdparty/v8/Makefile.android b/src/3rdparty/v8/Makefile.android
index 8e4ce08..aeff01c 100644
--- a/src/3rdparty/v8/Makefile.android
+++ b/src/3rdparty/v8/Makefile.android
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm
+ANDROID_ARCHES = android_ia32 android_arm android_mipsel
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@@ -50,11 +50,17 @@ ifeq ($(ARCH), android_arm)
DEFINES += arm_neon=0 armv7=1
TOOLCHAIN_ARCH = arm-linux-androideabi-4.6
else
- ifeq ($(ARCH), android_ia32)
- DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
- TOOLCHAIN_ARCH = x86-4.6
+ ifeq ($(ARCH), android_mipsel)
+ DEFINES = target_arch=mipsel v8_target_arch=mipsel android_target_arch=mips
+ DEFINES += mips_arch_variant=mips32r2
+ TOOLCHAIN_ARCH = mipsel-linux-android-4.6
else
- $(error Target architecture "${ARCH}" is not supported)
+ ifeq ($(ARCH), android_ia32)
+ DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86
+ TOOLCHAIN_ARCH = x86-4.6
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
endif
endif
diff --git a/src/3rdparty/v8/SConstruct b/src/3rdparty/v8/SConstruct
index 5f8616a..21d1902 100644
--- a/src/3rdparty/v8/SConstruct
+++ b/src/3rdparty/v8/SConstruct
@@ -67,16 +67,9 @@ LIBRARY_FLAGS = {
'debuggersupport:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
},
- 'inspector:on': {
- 'CPPDEFINES': ['INSPECTOR'],
- },
'fasttls:off': {
'CPPDEFINES': ['V8_NO_FAST_TLS'],
},
- 'liveobjectlist:on': {
- 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
- 'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
- }
},
'gcc': {
'all': {
@@ -1051,16 +1044,6 @@ SIMPLE_OPTIONS = {
'default': 'on',
'help': 'enable debugging of JavaScript code'
},
- 'inspector': {
- 'values': ['on', 'off'],
- 'default': 'off',
- 'help': 'enable inspector features'
- },
- 'liveobjectlist': {
- 'values': ['on', 'off'],
- 'default': 'off',
- 'help': 'enable live object list features in the debugger'
- },
'soname': {
'values': ['on', 'off'],
'default': 'off',
@@ -1418,13 +1401,6 @@ def PostprocessOptions(options, os):
options['msvcltcg'] = 'on'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
options['mipsabi'] = 'none'
- if options['liveobjectlist'] == 'on':
- if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
- # Print a warning that liveobjectlist will implicitly enable the debugger
- print "Warning: forcing debuggersupport on for liveobjectlist"
- options['debuggersupport'] = 'on'
- options['inspector'] = 'on'
- options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports):
diff --git a/src/3rdparty/v8/build/android.gypi b/src/3rdparty/v8/build/android.gypi
index 67a9d35..2ee7cf9 100644
--- a/src/3rdparty/v8/build/android.gypi
+++ b/src/3rdparty/v8/build/android.gypi
@@ -181,6 +181,11 @@
'-L<(android_stlport_libs)/armeabi',
],
}],
+ ['target_arch=="mipsel"', {
+ 'ldflags': [
+ '-L<(android_stlport_libs)/mips',
+ ],
+ }],
['target_arch=="ia32"', {
'ldflags': [
'-L<(android_stlport_libs)/x86',
@@ -197,6 +202,16 @@
'-fno-stack-protector',
],
}],
+ ['target_arch=="mipsel"', {
+ # The mips toolchain currently has problems with stack-protector.
+ 'cflags!': [
+ '-fstack-protector',
+ '-U__linux__'
+ ],
+ 'cflags': [
+ '-fno-stack-protector',
+ ],
+ }],
],
'target_conditions': [
['_type=="executable"', {
diff --git a/src/3rdparty/v8/build/common.gypi b/src/3rdparty/v8/build/common.gypi
index 9559d98..6e12f26 100644
--- a/src/3rdparty/v8/build/common.gypi
+++ b/src/3rdparty/v8/build/common.gypi
@@ -32,6 +32,7 @@
'use_system_v8%': 0,
'msvs_use_common_release': 0,
'gcc_version%': 'unknown',
+ 'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
@@ -51,6 +52,13 @@
'v8_can_use_vfp2_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
+ # Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
+ # registers d16-d31 in the generated code, both in the snapshot and for the
+ # ARM target. Leaving the default value of 'false' will avoid the use of
+ # these registers in the snapshot and use CPU feature probing when running
+ # on the target.
+ 'v8_can_use_vfp32dregs%': 'false',
+
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
@@ -66,12 +74,13 @@
# Default arch variant for MIPS.
'mips_arch_variant%': 'mips32r2',
+ 'v8_enable_latin_1%': 1,
+
'v8_enable_debugger_support%': 1,
- 'v8_enable_disassembler%': 0,
+ 'v8_enable_backtrace%': 0,
- # Enable extra checks in API functions and other strategic places.
- 'v8_enable_extra_checks%': 1,
+ 'v8_enable_disassembler%': 0,
'v8_enable_gdbjit%': 0,
@@ -91,7 +100,6 @@
'v8_use_snapshot%': 'true',
'host_os%': '<(OS)',
- 'v8_use_liveobjectlist%': 'false',
'werror%': '-Werror',
# With post mortem support enabled, metadata is embedded into libv8 that
@@ -108,15 +116,15 @@
},
'target_defaults': {
'conditions': [
+ ['v8_enable_latin_1==1', {
+ 'defines': ['ENABLE_LATIN_1',],
+ }],
['v8_enable_debugger_support==1', {
'defines': ['ENABLE_DEBUGGER_SUPPORT',],
}],
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@@ -149,12 +157,16 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
- [ 'v8_can_use_vfp2_instructions=="true"', {
+ # NEON implies VFP3 and VFP3 implies VFP2.
+ [ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
+ arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
- [ 'v8_can_use_vfp3_instructions=="true"', {
+ # NEON implies VFP3.
+ [ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
+ arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
@@ -162,7 +174,7 @@
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
- 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP2_INSTRUCTIONS',
],
'target_conditions': [
['_toolset=="target"', {
@@ -174,6 +186,11 @@
'USE_EABI_HARDFLOAT=0',
],
}],
+ [ 'v8_can_use_vfp32dregs=="true"', {
+ 'defines': [
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
],
}], # v8_target_arch=="arm"
['v8_target_arch=="ia32"', {
@@ -186,7 +203,7 @@
'V8_TARGET_ARCH_MIPS',
],
'variables': {
- 'mipscompiler': '<!($(echo ${CXX:-$(which g++)}) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
+ 'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
},
'conditions': [
['mipscompiler=="yes"', {
@@ -252,14 +269,6 @@
},
'msvs_configuration_platform': 'x64',
}], # v8_target_arch=="x64"
- ['v8_use_liveobjectlist=="true"', {
- 'defines': [
- 'ENABLE_DEBUGGER_SUPPORT',
- 'INSPECTOR',
- 'OBJECT_PRINT',
- 'LIVEOBJECTLIST',
- ],
- }],
['v8_compress_startup_data=="bz2"', {
'defines': [
'COMPRESS_STARTUP_DATA_BZ2',
@@ -312,7 +321,7 @@
}],
['_toolset=="target"', {
'variables': {
- 'm32flag': '<!((echo | $(echo ${CXX_target:-${CXX:-$(which g++)}}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
+ 'm32flag': '<!((echo | $(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1) && echo "-m32" || true)',
'clang%': 0,
},
'conditions': [
@@ -336,6 +345,9 @@
], # conditions
'configurations': {
'Debug': {
+ 'variables': {
+ 'v8_enable_extra_checks%': 1,
+ },
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@@ -360,10 +372,17 @@
},
},
'conditions': [
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
}],
+ ['OS=="linux" and v8_enable_backtrace==1', {
+ # Support for backtrace_symbols.
+ 'ldflags': [ '-rdynamic' ],
+ }],
['OS=="android"', {
'variables': {
'android_full_debug%': 1,
@@ -378,10 +397,21 @@
}],
],
}],
+ ['OS=="mac"', {
+ 'xcode_settings': {
+ 'GCC_OPTIMIZATION_LEVEL': '0', # -O0
+ },
+ }],
],
}, # Debug
'Release': {
+ 'variables': {
+ 'v8_enable_extra_checks%': 0,
+ },
'conditions': [
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
diff --git a/src/3rdparty/v8/build/gyp_v8 b/src/3rdparty/v8/build/gyp_v8
index 345f777..bf81ad3 100755
--- a/src/3rdparty/v8/build/gyp_v8
+++ b/src/3rdparty/v8/build/gyp_v8
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
diff --git a/src/3rdparty/v8/build/standalone.gypi b/src/3rdparty/v8/build/standalone.gypi
index 7145a16..125c5bf 100644
--- a/src/3rdparty/v8/build/standalone.gypi
+++ b/src/3rdparty/v8/build/standalone.gypi
@@ -32,6 +32,7 @@
'library%': 'static_library',
'component%': 'static_library',
'visibility%': 'hidden',
+ 'v8_enable_backtrace%': 0,
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.5',
'variables': {
@@ -86,6 +87,9 @@
'Debug': {
'cflags': [ '-g', '-O0' ],
},
+ 'Release': {
+ # Xcode insists on this empty entry.
+ },
},
},
'conditions': [
@@ -100,7 +104,7 @@
[ 'OS=="linux"', {
'cflags': [ '-ansi' ],
}],
- [ 'visibility=="hidden"', {
+ [ 'visibility=="hidden" and v8_enable_backtrace==0', {
'cflags': [ '-fvisibility=hidden' ],
}],
[ 'component=="shared_library"', {
@@ -191,7 +195,7 @@
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'GCC_THREADSAFE_STATICS': 'NO', # -fno-threadsafe-statics
'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES', # -Werror
- 'GCC_VERSION': '4.2',
+ 'GCC_VERSION': 'com.apple.compilers.llvmgcc42',
'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES', # -Wnewline-eof
# MACOSX_DEPLOYMENT_TARGET maps to -mmacosx-version-min
'MACOSX_DEPLOYMENT_TARGET': '<(mac_deployment_target)',
diff --git a/src/3rdparty/v8/include/v8-profiler.h b/src/3rdparty/v8/include/v8-profiler.h
index c1e9a9e..4d3597a 100644
--- a/src/3rdparty/v8/include/v8-profiler.h
+++ b/src/3rdparty/v8/include/v8-profiler.h
@@ -407,13 +407,28 @@ class V8EXPORT HeapProfiler {
static const SnapshotObjectId kUnknownObjectId = 0;
/**
+ * Callback interface for retrieving user friendly names of global objects.
+ */
+ class ObjectNameResolver {
+ public:
+ /**
+ * Returns name to be used in the heap snapshot for given node. Returned
+ * string must stay alive until snapshot collection is completed.
+ */
+ virtual const char* GetName(Handle<Object> object) = 0;
+ protected:
+ virtual ~ObjectNameResolver() {}
+ };
+
+ /**
* Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
*/
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
- ActivityControl* control = NULL);
+ ActivityControl* control = NULL,
+ ObjectNameResolver* global_object_name_resolver = NULL);
/**
* Starts tracking of heap objects population statistics. After calling
diff --git a/src/3rdparty/v8/include/v8.h b/src/3rdparty/v8/include/v8.h
index 27c5855..2ad95c5 100644
--- a/src/3rdparty/v8/include/v8.h
+++ b/src/3rdparty/v8/include/v8.h
@@ -38,6 +38,11 @@
#ifndef V8_H_
#define V8_H_
+// TODO(svenpanne) Remove me when the Chrome bindings are adapted.
+#define V8_DISABLE_DEPRECATIONS 1
+// TODO(dcarney): Remove once Latin-1 transitions in WebKit has stuck.
+#define V8_ONE_BYTE_STRINGS_ENABLED 1
+
#include "v8stdint.h"
#ifdef _WIN32
@@ -76,6 +81,22 @@
#endif // _WIN32
+#if defined(__GNUC__) && !defined(__MINGW32__) && !defined(__MINGW64__) && !defined(DEBUG)
+#define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator
+#elif defined(_MSC_VER) && !defined(DEBUG)
+#define V8_INLINE(declarator) __forceinline declarator
+#else
+#define V8_INLINE(declarator) inline declarator
+#endif
+
+#if defined(__GNUC__) && !V8_DISABLE_DEPRECATIONS
+#define V8_DEPRECATED(declarator) declarator __attribute__ ((deprecated))
+#elif defined(_MSC_VER) && !V8_DISABLE_DEPRECATIONS
+#define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
+#else
+#define V8_DEPRECATED(declarator) declarator
+#endif
+
/**
* The v8 JavaScript engine.
*/
@@ -138,6 +159,10 @@ class Isolate;
typedef void (*WeakReferenceCallback)(Persistent<Value> object,
void* parameter);
+// TODO(svenpanne) Temporary definition until Chrome is in sync.
+typedef void (*NearDeathCallback)(Isolate* isolate,
+ Persistent<Value> object,
+ void* parameter);
// --- Handles ---
@@ -176,12 +201,12 @@ template <class T> class Handle {
/**
* Creates an empty handle.
*/
- inline Handle() : val_(0) {}
+ V8_INLINE(Handle()) : val_(0) {}
/**
* Creates a new handle for the specified value.
*/
- inline explicit Handle(T* val) : val_(val) {}
+ V8_INLINE(explicit Handle(T* val)) : val_(val) {}
/**
* Creates a handle for the contents of the specified handle. This
@@ -193,7 +218,7 @@ template <class T> class Handle {
* Handle<String> to a variable declared as Handle<Value>, is legal
* because String is a subclass of Value.
*/
- template <class S> inline Handle(Handle<S> that)
+ template <class S> V8_INLINE(Handle(Handle<S> that))
: val_(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -206,16 +231,16 @@ template <class T> class Handle {
/**
* Returns true if the handle is empty.
*/
- inline bool IsEmpty() const { return val_ == 0; }
+ V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- inline void Clear() { val_ = 0; }
+ V8_INLINE(void Clear()) { val_ = 0; }
- inline T* operator->() const { return val_; }
+ V8_INLINE(T* operator->() const) { return val_; }
- inline T* operator*() const { return val_; }
+ V8_INLINE(T* operator*() const) { return val_; }
/**
* Checks whether two handles are the same.
@@ -223,7 +248,7 @@ template <class T> class Handle {
* to which they refer are identical.
* The handles' references are not checked.
*/
- template <class S> inline bool operator==(Handle<S> that) const {
+ template <class S> V8_INLINE(bool operator==(Handle<S> that) const) {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@@ -237,11 +262,11 @@ template <class T> class Handle {
* the objects to which they refer are different.
* The handles' references are not checked.
*/
- template <class S> inline bool operator!=(Handle<S> that) const {
+ template <class S> V8_INLINE(bool operator!=(Handle<S> that) const) {
return !operator==(that);
}
- template <class S> static inline Handle<T> Cast(Handle<S> that) {
+ template <class S> V8_INLINE(static Handle<T> Cast(Handle<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -250,7 +275,7 @@ template <class T> class Handle {
return Handle<T>(T::Cast(*that));
}
- template <class S> inline Handle<S> As() {
+ template <class S> V8_INLINE(Handle<S> As()) {
return Handle<S>::Cast(*this);
}
@@ -268,8 +293,8 @@ template <class T> class Handle {
*/
template <class T> class Local : public Handle<T> {
public:
- inline Local();
- template <class S> inline Local(Local<S> that)
+ V8_INLINE(Local());
+ template <class S> V8_INLINE(Local(Local<S> that))
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -278,8 +303,8 @@ template <class T> class Local : public Handle<T> {
*/
TYPE_CHECK(T, S);
}
- template <class S> inline Local(S* that) : Handle<T>(that) { }
- template <class S> static inline Local<T> Cast(Local<S> that) {
+ template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
+ template <class S> V8_INLINE(static Local<T> Cast(Local<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -288,15 +313,17 @@ template <class T> class Local : public Handle<T> {
return Local<T>(T::Cast(*that));
}
- template <class S> inline Local<S> As() {
+ template <class S> V8_INLINE(Local<S> As()) {
return Local<S>::Cast(*this);
}
- /** Create a local handle for the content of another handle.
- * The referee is kept alive by the local handle even when
- * the original handle is destroyed/disposed.
+ /**
+ * Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
*/
- inline static Local<T> New(Handle<T> that);
+ V8_INLINE(static Local<T> New(Handle<T> that));
+ V8_INLINE(static Local<T> New(Isolate* isolate, Handle<T> that));
};
@@ -323,7 +350,7 @@ template <class T> class Persistent : public Handle<T> {
* Creates an empty persistent handle that doesn't point to any
* storage cell.
*/
- inline Persistent();
+ V8_INLINE(Persistent());
/**
* Creates a persistent handle for the same storage cell as the
@@ -336,7 +363,7 @@ template <class T> class Persistent : public Handle<T> {
* Persistent<String> to a variable declared as Persistent<Value>,
* is allowed as String is a subclass of Value.
*/
- template <class S> inline Persistent(Persistent<S> that)
+ template <class S> V8_INLINE(Persistent(Persistent<S> that))
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -346,16 +373,16 @@ template <class T> class Persistent : public Handle<T> {
TYPE_CHECK(T, S);
}
- template <class S> inline Persistent(S* that) : Handle<T>(that) { }
+ template <class S> V8_INLINE(Persistent(S* that)) : Handle<T>(that) { }
/**
* "Casts" a plain handle which is known to be a persistent handle
* to a persistent handle.
*/
- template <class S> explicit inline Persistent(Handle<S> that)
+ template <class S> explicit V8_INLINE(Persistent(Handle<S> that))
: Handle<T>(*that) { }
- template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
+ template <class S> V8_INLINE(static Persistent<T> Cast(Persistent<S> that)) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -364,15 +391,20 @@ template <class T> class Persistent : public Handle<T> {
return Persistent<T>(T::Cast(*that));
}
- template <class S> inline Persistent<S> As() {
+ template <class S> V8_INLINE(Persistent<S> As()) {
return Persistent<S>::Cast(*this);
}
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(static Persistent<T> New(Handle<T> that));
+
/**
- * Creates a new persistent handle for an existing local or
- * persistent handle.
+ * Creates a new persistent handle for an existing local or persistent handle.
*/
- inline static Persistent<T> New(Handle<T> that);
+ V8_INLINE(static Persistent<T> New(Isolate* isolate, Handle<T> that));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void Dispose());
/**
* Releases the storage cell referenced by this persistent handle.
@@ -380,62 +412,87 @@ template <class T> class Persistent : public Handle<T> {
* This handle's reference, and any other references to the storage
* cell remain and IsEmpty will still return false.
*/
- inline void Dispose();
- inline void Dispose(Isolate* isolate);
+ V8_INLINE(void Dispose(Isolate* isolate));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void MakeWeak(void* parameters,
+ WeakReferenceCallback callback));
/**
* Make the reference to this object weak. When only weak handles
* refer to the object, the garbage collector will perform a
- * callback to the given V8::WeakReferenceCallback function, passing
+ * callback to the given V8::NearDeathCallback function, passing
* it the object reference and the given parameters.
*/
- inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
+ V8_INLINE(void MakeWeak(Isolate* isolate,
+ void* parameters,
+ NearDeathCallback callback));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void ClearWeak());
/** Clears the weak reference to this object. */
- inline void ClearWeak();
+ V8_INLINE(void ClearWeak(Isolate* isolate));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void MarkIndependent());
/**
- * Marks the reference to this object independent. Garbage collector
- * is free to ignore any object groups containing this object.
- * Weak callback for an independent handle should not
- * assume that it will be preceded by a global GC prologue callback
- * or followed by a global GC epilogue callback.
+ * Marks the reference to this object independent. Garbage collector is free
+ * to ignore any object groups containing this object. Weak callback for an
+ * independent handle should not assume that it will be preceded by a global
+ * GC prologue callback or followed by a global GC epilogue callback.
*/
- inline void MarkIndependent();
- inline void MarkIndependent(Isolate* isolate);
+ V8_INLINE(void MarkIndependent(Isolate* isolate));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void MarkPartiallyDependent());
/**
- * Marks the reference to this object partially dependent. Partially
- * dependent handles only depend on other partially dependent handles and
- * these dependencies are provided through object groups. It provides a way
- * to build smaller object groups for young objects that represent only a
- * subset of all external dependencies. This mark is automatically cleared
- * after each garbage collection.
+ * Marks the reference to this object partially dependent. Partially dependent
+ * handles only depend on other partially dependent handles and these
+ * dependencies are provided through object groups. It provides a way to build
+ * smaller object groups for young objects that represent only a subset of all
+ * external dependencies. This mark is automatically cleared after each
+ * garbage collection.
*/
- inline void MarkPartiallyDependent();
- inline void MarkPartiallyDependent(Isolate* isolate);
+ V8_INLINE(void MarkPartiallyDependent(Isolate* isolate));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(bool IsIndependent() const);
/** Returns true if this handle was previously marked as independent. */
- inline bool IsIndependent() const;
- inline bool IsIndependent(Isolate* isolate) const;
+ V8_INLINE(bool IsIndependent(Isolate* isolate) const);
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(bool IsNearDeath() const);
/** Checks if the handle holds the only reference to an object. */
- inline bool IsNearDeath() const;
+ V8_INLINE(bool IsNearDeath(Isolate* isolate) const);
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(bool IsWeak() const);
/** Returns true if the handle's reference is weak. */
- inline bool IsWeak() const;
+ V8_INLINE(bool IsWeak(Isolate* isolate) const);
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(void SetWrapperClassId(uint16_t class_id));
/**
- * Assigns a wrapper class ID to the handle. See RetainedObjectInfo
- * interface description in v8-profiler.h for details.
+ * Assigns a wrapper class ID to the handle. See RetainedObjectInfo interface
+ * description in v8-profiler.h for details.
*/
- inline void SetWrapperClassId(uint16_t class_id);
+ V8_INLINE(void SetWrapperClassId(Isolate* isolate, uint16_t class_id));
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(uint16_t WrapperClassId() const);
/**
- * Returns the class ID previously assigned to this handle or 0 if no class
- * ID was previously assigned.
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
*/
- inline uint16_t WrapperClassId() const;
+ V8_INLINE(uint16_t WrapperClassId(Isolate* isolate) const);
private:
friend class ImplementationUtilities;
@@ -478,12 +535,14 @@ class V8EXPORT HandleScope {
* Creates a new handle with the given value.
*/
static internal::Object** CreateHandle(internal::Object* value);
+ static internal::Object** CreateHandle(internal::Isolate* isolate,
+ internal::Object* value);
// Faster version, uses HeapObject to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* value);
private:
- // Make it impossible to create heap-allocated or illegal handle
- // scopes by disallowing certain operations.
+ // Make it hard to create heap-allocated or illegal handle scopes by
+ // disallowing certain operations.
HandleScope(const HandleScope&);
void operator=(const HandleScope&);
void* operator new(size_t size);
@@ -496,7 +555,7 @@ class V8EXPORT HandleScope {
internal::Object** next;
internal::Object** limit;
int level;
- inline void Initialize() {
+ V8_INLINE(void Initialize()) {
next = limit = NULL;
level = 0;
}
@@ -589,16 +648,16 @@ class V8EXPORT ScriptData { // NOLINT
*/
class ScriptOrigin {
public:
- inline ScriptOrigin(
+ V8_INLINE(ScriptOrigin(
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
- Handle<Integer> resource_column_offset = Handle<Integer>())
+ Handle<Integer> resource_column_offset = Handle<Integer>()))
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset) { }
- inline Handle<Value> ResourceName() const;
- inline Handle<Integer> ResourceLineOffset() const;
- inline Handle<Integer> ResourceColumnOffset() const;
+ V8_INLINE(Handle<Value> ResourceName() const);
+ V8_INLINE(Handle<Integer> ResourceLineOffset() const);
+ V8_INLINE(Handle<Integer> ResourceColumnOffset() const);
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
@@ -891,183 +950,184 @@ class V8EXPORT StackFrame {
/**
* The superclass of all JavaScript values and objects.
*/
-class Value : public Data {
+class V8EXPORT Value : public Data {
public:
/**
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
- inline bool IsUndefined() const;
+ V8_INLINE(bool IsUndefined() const);
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
- inline bool IsNull() const;
+ V8_INLINE(bool IsNull() const);
/**
* Returns true if this value is true.
*/
- V8EXPORT bool IsTrue() const;
+ bool IsTrue() const;
/**
* Returns true if this value is false.
*/
- V8EXPORT bool IsFalse() const;
+ bool IsFalse() const;
/**
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- inline bool IsString() const;
+ V8_INLINE(bool IsString() const);
/**
* Returns true if this value is a function.
*/
- V8EXPORT bool IsFunction() const;
+ bool IsFunction() const;
/**
* Returns true if this value is an array.
*/
- V8EXPORT bool IsArray() const;
+ bool IsArray() const;
/**
* Returns true if this value is an object.
*/
- V8EXPORT bool IsObject() const;
+ bool IsObject() const;
/**
* Returns true if this value is boolean.
*/
- V8EXPORT bool IsBoolean() const;
+ bool IsBoolean() const;
/**
* Returns true if this value is a number.
*/
- V8EXPORT bool IsNumber() const;
+ bool IsNumber() const;
/**
* Returns true if this value is external.
*/
- V8EXPORT bool IsExternal() const;
+ bool IsExternal() const;
/**
* Returns true if this value is a 32-bit signed integer.
*/
- V8EXPORT bool IsInt32() const;
+ bool IsInt32() const;
/**
* Returns true if this value is a 32-bit unsigned integer.
*/
- V8EXPORT bool IsUint32() const;
+ bool IsUint32() const;
/**
* Returns true if this value is a Date.
*/
- V8EXPORT bool IsDate() const;
+ bool IsDate() const;
/**
* Returns true if this value is a Boolean object.
*/
- V8EXPORT bool IsBooleanObject() const;
+ bool IsBooleanObject() const;
/**
* Returns true if this value is a Number object.
*/
- V8EXPORT bool IsNumberObject() const;
+ bool IsNumberObject() const;
/**
* Returns true if this value is a String object.
*/
- V8EXPORT bool IsStringObject() const;
+ bool IsStringObject() const;
/**
* Returns true if this value is a NativeError.
*/
- V8EXPORT bool IsNativeError() const;
+ bool IsNativeError() const;
/**
* Returns true if this value is a RegExp.
*/
- V8EXPORT bool IsRegExp() const;
+ bool IsRegExp() const;
/**
* Returns true if this value is an Error.
*/
- V8EXPORT bool IsError() const;
+ bool IsError() const;
- V8EXPORT Local<Boolean> ToBoolean() const;
- V8EXPORT Local<Number> ToNumber() const;
- V8EXPORT Local<String> ToString() const;
- V8EXPORT Local<String> ToDetailString() const;
- V8EXPORT Local<Object> ToObject() const;
- V8EXPORT Local<Integer> ToInteger() const;
- V8EXPORT Local<Uint32> ToUint32() const;
- V8EXPORT Local<Int32> ToInt32() const;
+ Local<Boolean> ToBoolean() const;
+ Local<Number> ToNumber() const;
+ Local<String> ToString() const;
+ Local<String> ToDetailString() const;
+ Local<Object> ToObject() const;
+ Local<Integer> ToInteger() const;
+ Local<Uint32> ToUint32() const;
+ Local<Int32> ToInt32() const;
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
- V8EXPORT Local<Uint32> ToArrayIndex() const;
+ Local<Uint32> ToArrayIndex() const;
- V8EXPORT bool BooleanValue() const;
- V8EXPORT double NumberValue() const;
- V8EXPORT int64_t IntegerValue() const;
- V8EXPORT uint32_t Uint32Value() const;
- V8EXPORT int32_t Int32Value() const;
+ bool BooleanValue() const;
+ double NumberValue() const;
+ int64_t IntegerValue() const;
+ uint32_t Uint32Value() const;
+ int32_t Int32Value() const;
/** JS == */
- V8EXPORT bool Equals(Handle<Value> that) const;
- V8EXPORT bool StrictEquals(Handle<Value> that) const;
+ bool Equals(Handle<Value> that) const;
+ bool StrictEquals(Handle<Value> that) const;
private:
- inline bool QuickIsUndefined() const;
- inline bool QuickIsNull() const;
- inline bool QuickIsString() const;
- V8EXPORT bool FullIsUndefined() const;
- V8EXPORT bool FullIsNull() const;
- V8EXPORT bool FullIsString() const;
+ V8_INLINE(bool QuickIsUndefined() const);
+ V8_INLINE(bool QuickIsNull() const);
+ V8_INLINE(bool QuickIsString() const);
+ bool FullIsUndefined() const;
+ bool FullIsNull() const;
+ bool FullIsString() const;
};
/**
* The superclass of primitive values. See ECMA-262 4.3.2.
*/
-class Primitive : public Value { };
+class V8EXPORT Primitive : public Value { };
/**
* A primitive boolean value (ECMA-262, 4.3.14). Either the true
* or false value.
*/
-class Boolean : public Primitive {
+class V8EXPORT Boolean : public Primitive {
public:
- V8EXPORT bool Value() const;
- static inline Handle<Boolean> New(bool value);
+ bool Value() const;
+ V8_INLINE(static Handle<Boolean> New(bool value));
};
/**
* A JavaScript string value (ECMA-262, 4.3.17).
*/
-class String : public Primitive {
+class V8EXPORT String : public Primitive {
public:
enum Encoding {
UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0,
- ASCII_ENCODING = 0x4
+ ASCII_ENCODING = 0x4,
+ ONE_BYTE_ENCODING = 0x4
};
/**
* Returns the number of characters in this string.
*/
- V8EXPORT int Length() const;
+ int Length() const;
/**
* Returns the number of bytes in the UTF-8 encoded
* representation of this string.
*/
- V8EXPORT int Utf8Length() const;
+ int Utf8Length() const;
/**
* A fast conservative check for non-ASCII characters. May
@@ -1075,12 +1135,17 @@ class String : public Primitive {
* false you can be sure that all characters are in the range
* 0-127.
*/
- V8EXPORT bool MayContainNonAscii() const;
+ bool MayContainNonAscii() const;
+
+ /**
+ * Returns whether this string contains only one byte data.
+ */
+ bool IsOneByte() const;
/**
* Returns the hash of this string.
*/
- V8EXPORT uint32_t Hash() const;
+ uint32_t Hash() const;
struct CompleteHashData {
CompleteHashData() : length(0), hash(0), symbol_id(0) {}
@@ -1103,14 +1168,14 @@ class String : public Primitive {
* If the symbol ids are different the strings may still be
* identical, but an Equals() check must be performed.
*/
- V8EXPORT CompleteHashData CompleteHash() const;
+ CompleteHashData CompleteHash() const;
/**
* Compute a hash value for the passed UTF16 string
* data.
*/
- V8EXPORT static uint32_t ComputeHash(uint16_t *string, int length);
- V8EXPORT static uint32_t ComputeHash(char *string, int length);
+ static uint32_t ComputeHash(uint16_t *string, int length);
+ static uint32_t ComputeHash(char *string, int length);
/**
* Write the contents of the string to an external buffer.
@@ -1145,36 +1210,41 @@ class String : public Primitive {
};
// 16-bit character codes.
- V8EXPORT int Write(uint16_t* buffer,
- int start = 0,
- int length = -1,
- int options = NO_OPTIONS) const;
+ int Write(uint16_t* buffer,
+ int start = 0,
+ int length = -1,
+ int options = NO_OPTIONS) const;
// ASCII characters.
- V8EXPORT int WriteAscii(char* buffer,
- int start = 0,
- int length = -1,
- int options = NO_OPTIONS) const;
+ int WriteAscii(char* buffer,
+ int start = 0,
+ int length = -1,
+ int options = NO_OPTIONS) const;
+ // One byte characters.
+ int WriteOneByte(uint8_t* buffer,
+ int start = 0,
+ int length = -1,
+ int options = NO_OPTIONS) const;
// UTF-8 encoded characters.
- V8EXPORT int WriteUtf8(char* buffer,
- int length = -1,
- int* nchars_ref = NULL,
- int options = NO_OPTIONS) const;
+ int WriteUtf8(char* buffer,
+ int length = -1,
+ int* nchars_ref = NULL,
+ int options = NO_OPTIONS) const;
/**
* A zero length string.
*/
- V8EXPORT static v8::Local<v8::String> Empty();
- inline static v8::Local<v8::String> Empty(Isolate* isolate);
+ static v8::Local<v8::String> Empty();
+ V8_INLINE(static v8::Local<v8::String> Empty(Isolate* isolate));
/**
* Returns true if the string is external
*/
- V8EXPORT bool IsExternal() const;
+ bool IsExternal() const;
/**
* Returns true if the string is both external and ASCII
*/
- V8EXPORT bool IsExternalAscii() const;
+ bool IsExternalAscii() const;
class V8EXPORT ExternalStringResourceBase { // NOLINT
public:
@@ -1255,52 +1325,48 @@ class String : public Primitive {
ExternalAsciiStringResource() {}
};
+ typedef ExternalAsciiStringResource ExternalOneByteStringResource;
+
/**
* If the string is an external string, return the ExternalStringResourceBase
* regardless of the encoding, otherwise return NULL. The encoding of the
* string is returned in encoding_out.
*/
- inline ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const;
+ V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const);
/**
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- inline ExternalStringResource* GetExternalStringResource() const;
+ V8_INLINE(ExternalStringResource* GetExternalStringResource() const);
/**
* Get the ExternalAsciiStringResource for an external ASCII string.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
- V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
- const;
+ const ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
- static inline String* Cast(v8::Value* obj);
+ V8_INLINE(static String* Cast(v8::Value* obj));
/**
* Allocates a new string from either UTF-8 encoded or ASCII data.
- * The second parameter 'length' gives the buffer length.
- * If the data is UTF-8 encoded, the caller must
- * be careful to supply the length parameter.
- * If it is not given, the function calls
- * 'strlen' to determine the buffer length, it might be
- * wrong if 'data' contains a null character.
+ * The second parameter 'length' gives the buffer length. If omitted,
+ * the function calls 'strlen' to determine the buffer length.
*/
- V8EXPORT static Local<String> New(const char* data, int length = -1);
+ static Local<String> New(const char* data, int length = -1);
/** Allocates a new string from 16-bit character codes.*/
- V8EXPORT static Local<String> New(const uint16_t* data, int length = -1);
+ static Local<String> New(const uint16_t* data, int length = -1);
/** Creates a symbol. Returns one if it exists already.*/
- V8EXPORT static Local<String> NewSymbol(const char* data, int length = -1);
+ static Local<String> NewSymbol(const char* data, int length = -1);
/**
* Creates a new string by concatenating the left and the right strings
* passed in as parameters.
*/
- V8EXPORT static Local<String> Concat(Handle<String> left,
- Handle<String> right);
+ static Local<String> Concat(Handle<String> left, Handle<String> right);
/**
* Creates a new external string using the data defined in the given
@@ -1310,7 +1376,7 @@ class String : public Primitive {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- V8EXPORT static Local<String> NewExternal(ExternalStringResource* resource);
+ static Local<String> NewExternal(ExternalStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -1321,7 +1387,7 @@ class String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
- V8EXPORT bool MakeExternal(ExternalStringResource* resource);
+ bool MakeExternal(ExternalStringResource* resource);
/**
* Creates a new external string using the ASCII data defined in the given
@@ -1330,8 +1396,8 @@ class String : public Primitive {
* this function should not otherwise delete or modify the resource. Neither
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
- */ V8EXPORT static Local<String> NewExternal(
- ExternalAsciiStringResource* resource);
+ */
+ static Local<String> NewExternal(ExternalAsciiStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -1342,20 +1408,18 @@ class String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource.
*/
- V8EXPORT bool MakeExternal(ExternalAsciiStringResource* resource);
+ bool MakeExternal(ExternalAsciiStringResource* resource);
/**
* Returns true if this string can be made external.
*/
- V8EXPORT bool CanMakeExternal();
+ bool CanMakeExternal();
/** Creates an undetectable string from the supplied ASCII or UTF-8 data.*/
- V8EXPORT static Local<String> NewUndetectable(const char* data,
- int length = -1);
+ static Local<String> NewUndetectable(const char* data, int length = -1);
/** Creates an undetectable string from the supplied 16-bit character codes.*/
- V8EXPORT static Local<String> NewUndetectable(const uint16_t* data,
- int length = -1);
+ static Local<String> NewUndetectable(const uint16_t* data, int length = -1);
/**
* Converts an object to a UTF-8-encoded character array. Useful if
@@ -1426,63 +1490,63 @@ class String : public Primitive {
};
private:
- V8EXPORT void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
- Encoding encoding) const;
- V8EXPORT void VerifyExternalStringResource(ExternalStringResource* val) const;
- V8EXPORT static void CheckCast(v8::Value* obj);
+ void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
+ Encoding encoding) const;
+ void VerifyExternalStringResource(ExternalStringResource* val) const;
+ static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
-class Number : public Primitive {
+class V8EXPORT Number : public Primitive {
public:
- V8EXPORT double Value() const;
- V8EXPORT static Local<Number> New(double value);
- static inline Number* Cast(v8::Value* obj);
+ double Value() const;
+ static Local<Number> New(double value);
+ V8_INLINE(static Number* Cast(v8::Value* obj));
private:
- V8EXPORT Number();
- V8EXPORT static void CheckCast(v8::Value* obj);
+ Number();
+ static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript value representing a signed integer.
*/
-class Integer : public Number {
+class V8EXPORT Integer : public Number {
public:
- V8EXPORT static Local<Integer> New(int32_t value);
- V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value);
- V8EXPORT static Local<Integer> New(int32_t value, Isolate*);
- V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
- V8EXPORT int64_t Value() const;
- static inline Integer* Cast(v8::Value* obj);
+ static Local<Integer> New(int32_t value);
+ static Local<Integer> NewFromUnsigned(uint32_t value);
+ static Local<Integer> New(int32_t value, Isolate*);
+ static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
+ int64_t Value() const;
+ V8_INLINE(static Integer* Cast(v8::Value* obj));
private:
- V8EXPORT Integer();
- V8EXPORT static void CheckCast(v8::Value* obj);
+ Integer();
+ static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript value representing a 32-bit signed integer.
*/
-class Int32 : public Integer {
+class V8EXPORT Int32 : public Integer {
public:
- V8EXPORT int32_t Value() const;
+ int32_t Value() const;
private:
- V8EXPORT Int32();
+ Int32();
};
/**
* A JavaScript value representing a 32-bit unsigned integer.
*/
-class Uint32 : public Integer {
+class V8EXPORT Uint32 : public Integer {
public:
- V8EXPORT uint32_t Value() const;
+ uint32_t Value() const;
private:
- V8EXPORT Uint32();
+ Uint32();
};
@@ -1543,14 +1607,13 @@ enum AccessControl {
/**
* A JavaScript object (ECMA-262, 4.3.3)
*/
-class Object : public Value {
+class V8EXPORT Object : public Value {
public:
- V8EXPORT bool Set(Handle<Value> key,
- Handle<Value> value,
- PropertyAttribute attribs = None);
+ bool Set(Handle<Value> key,
+ Handle<Value> value,
+ PropertyAttribute attribs = None);
- V8EXPORT bool Set(uint32_t index,
- Handle<Value> value);
+ bool Set(uint32_t index, Handle<Value> value);
// Sets a local property on this object bypassing interceptors and
// overriding accessors or read-only properties.
@@ -1560,41 +1623,41 @@ class Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
- V8EXPORT bool ForceSet(Handle<Value> key,
- Handle<Value> value,
- PropertyAttribute attribs = None);
+ bool ForceSet(Handle<Value> key,
+ Handle<Value> value,
+ PropertyAttribute attribs = None);
- V8EXPORT Local<Value> Get(Handle<Value> key);
+ Local<Value> Get(Handle<Value> key);
- V8EXPORT Local<Value> Get(uint32_t index);
+ Local<Value> Get(uint32_t index);
/**
* Gets the property attributes of a property which can be None or
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
- V8EXPORT PropertyAttribute GetPropertyAttributes(Handle<Value> key);
+ PropertyAttribute GetPropertyAttributes(Handle<Value> key);
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
- V8EXPORT bool Has(Handle<String> key);
+ bool Has(Handle<String> key);
- V8EXPORT bool Delete(Handle<String> key);
+ bool Delete(Handle<String> key);
// Delete a property on this object bypassing interceptors and
// ignoring dont-delete attributes.
- V8EXPORT bool ForceDelete(Handle<Value> key);
+ bool ForceDelete(Handle<Value> key);
- V8EXPORT bool Has(uint32_t index);
+ bool Has(uint32_t index);
- V8EXPORT bool Delete(uint32_t index);
+ bool Delete(uint32_t index);
- V8EXPORT bool SetAccessor(Handle<String> name,
- AccessorGetter getter,
- AccessorSetter setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
+ bool SetAccessor(Handle<String> name,
+ AccessorGetter getter,
+ AccessorSetter setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
/**
* Returns an array containing the names of the enumerable properties
@@ -1602,66 +1665,75 @@ class Object : public Value {
* array returned by this method contains the same values as would
* be enumerated by a for-in statement over this object.
*/
- V8EXPORT Local<Array> GetPropertyNames();
+ Local<Array> GetPropertyNames();
/**
* This function has the same functionality as GetPropertyNames but
* the returned array doesn't contain the names of properties from
* prototype objects.
*/
- V8EXPORT Local<Array> GetOwnPropertyNames();
+ Local<Array> GetOwnPropertyNames();
/**
* Get the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- V8EXPORT Local<Value> GetPrototype();
+ Local<Value> GetPrototype();
/**
* Set the prototype object. This does not skip objects marked to
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- V8EXPORT bool SetPrototype(Handle<Value> prototype);
+ bool SetPrototype(Handle<Value> prototype);
/**
* Finds an instance of the given function template in the prototype
* chain.
*/
- V8EXPORT Local<Object> FindInstanceInPrototypeChain(
- Handle<FunctionTemplate> tmpl);
+ Local<Object> FindInstanceInPrototypeChain(Handle<FunctionTemplate> tmpl);
/**
* Call builtin Object.prototype.toString on this object.
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
- V8EXPORT Local<String> ObjectProtoToString();
+ Local<String> ObjectProtoToString();
/**
* Returns the function invoked as a constructor for this object.
* May be the null value.
*/
- V8EXPORT Local<Value> GetConstructor();
+ Local<Value> GetConstructor();
/**
* Returns the name of the function invoked as a constructor for this object.
*/
- V8EXPORT Local<String> GetConstructorName();
+ Local<String> GetConstructorName();
/** Gets the number of internal fields for this Object. */
- V8EXPORT int InternalFieldCount();
- /** Gets the value in an internal field. */
- inline Local<Value> GetInternalField(int index);
+ int InternalFieldCount();
+
+ /** Gets the value from an internal field. */
+ V8_INLINE(Local<Value> GetInternalField(int index));
+
/** Sets the value in an internal field. */
- V8EXPORT void SetInternalField(int index, Handle<Value> value);
+ void SetInternalField(int index, Handle<Value> value);
- /** Gets a native pointer from an internal field. */
- inline void* GetPointerFromInternalField(int index);
+ /**
+ * Gets a 2-byte-aligned native pointer from an internal field. This field
+ * must have been set by SetAlignedPointerInInternalField, everything else
+ * leads to undefined behavior.
+ */
+ V8_INLINE(void* GetAlignedPointerFromInternalField(int index));
- /** Sets a native pointer in an internal field. */
- V8EXPORT void SetPointerInInternalField(int index, void* value);
+ /**
+ * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
+ * a field, GetAlignedPointerFromInternalField must be used, everything else
+ * leads to undefined behavior.
+ */
+ void SetAlignedPointerInInternalField(int index, void* value);
class V8EXPORT ExternalResource { // NOLINT
public:
@@ -1679,41 +1751,40 @@ class Object : public Value {
friend class v8::internal::Heap;
};
- V8EXPORT void SetExternalResource(ExternalResource *);
- V8EXPORT ExternalResource *GetExternalResource();
+ void SetExternalResource(ExternalResource* resource);
+ ExternalResource* GetExternalResource();
// Testers for local properties.
- V8EXPORT bool HasOwnProperty(Handle<String> key);
- V8EXPORT bool HasRealNamedProperty(Handle<String> key);
- V8EXPORT bool HasRealIndexedProperty(uint32_t index);
- V8EXPORT bool HasRealNamedCallbackProperty(Handle<String> key);
+ bool HasOwnProperty(Handle<String> key);
+ bool HasRealNamedProperty(Handle<String> key);
+ bool HasRealIndexedProperty(uint32_t index);
+ bool HasRealNamedCallbackProperty(Handle<String> key);
/**
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8EXPORT Local<Value> GetRealNamedPropertyInPrototypeChain(
- Handle<String> key);
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
/**
* If result.IsEmpty() no real property was located on the object or
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- V8EXPORT Local<Value> GetRealNamedProperty(Handle<String> key);
+ Local<Value> GetRealNamedProperty(Handle<String> key);
/** Tests for a named lookup interceptor.*/
- V8EXPORT bool HasNamedLookupInterceptor();
+ bool HasNamedLookupInterceptor();
/** Tests for an index lookup interceptor.*/
- V8EXPORT bool HasIndexedLookupInterceptor();
+ bool HasIndexedLookupInterceptor();
/**
* Turns on access check on the object if the object is an instance of
* a template that has access check callbacks. If an object has no
* access check info, the object cannot be accessed by anyone.
*/
- V8EXPORT void TurnOnAccessCheck();
+ void TurnOnAccessCheck();
/**
* Returns the identity hash for this object. The current implementation
@@ -1722,7 +1793,7 @@ class Object : public Value {
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
*/
- V8EXPORT int GetIdentityHash();
+ int GetIdentityHash();
/**
* Access hidden properties on JavaScript objects. These properties are
@@ -1730,9 +1801,9 @@ class Object : public Value {
* C++ API. Hidden properties introduced by V8 internally (for example the
* identity hash) are prefixed with "v8::".
*/
- V8EXPORT bool SetHiddenValue(Handle<String> key, Handle<Value> value);
- V8EXPORT Local<Value> GetHiddenValue(Handle<String> key);
- V8EXPORT bool DeleteHiddenValue(Handle<String> key);
+ bool SetHiddenValue(Handle<String> key, Handle<Value> value);
+ Local<Value> GetHiddenValue(Handle<String> key);
+ bool DeleteHiddenValue(Handle<String> key);
/**
* Returns true if this is an instance of an api function (one
@@ -1741,18 +1812,18 @@ class Object : public Value {
* conservative and may return true for objects that haven't actually
* been modified.
*/
- V8EXPORT bool IsDirty();
+ bool IsDirty();
/**
* Clone this object with a fast but shallow copy. Values will point
* to the same values as the original object.
*/
- V8EXPORT Local<Object> Clone();
+ Local<Object> Clone();
/**
* Returns the context in which the object was created.
*/
- V8EXPORT Local<Context> CreationContext();
+ Local<Context> CreationContext();
/**
* Set the backing store of the indexed properties to be managed by the
@@ -1761,10 +1832,10 @@ class Object : public Value {
* Note: The embedding program still owns the data and needs to ensure that
* the backing store is preserved while V8 has a reference.
*/
- V8EXPORT void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
- V8EXPORT bool HasIndexedPropertiesInPixelData();
- V8EXPORT uint8_t* GetIndexedPropertiesPixelData();
- V8EXPORT int GetIndexedPropertiesPixelDataLength();
+ void SetIndexedPropertiesToPixelData(uint8_t* data, int length);
+ bool HasIndexedPropertiesInPixelData();
+ uint8_t* GetIndexedPropertiesPixelData();
+ int GetIndexedPropertiesPixelDataLength();
/**
* Set the backing store of the indexed properties to be managed by the
@@ -1773,93 +1844,83 @@ class Object : public Value {
* Note: The embedding program still owns the data and needs to ensure that
* the backing store is preserved while V8 has a reference.
*/
- V8EXPORT void SetIndexedPropertiesToExternalArrayData(
- void* data,
- ExternalArrayType array_type,
- int number_of_elements);
- V8EXPORT bool HasIndexedPropertiesInExternalArrayData();
- V8EXPORT void* GetIndexedPropertiesExternalArrayData();
- V8EXPORT ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
- V8EXPORT int GetIndexedPropertiesExternalArrayDataLength();
+ void SetIndexedPropertiesToExternalArrayData(void* data,
+ ExternalArrayType array_type,
+ int number_of_elements);
+ bool HasIndexedPropertiesInExternalArrayData();
+ void* GetIndexedPropertiesExternalArrayData();
+ ExternalArrayType GetIndexedPropertiesExternalArrayDataType();
+ int GetIndexedPropertiesExternalArrayDataLength();
/**
* Checks whether a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
* When an Object is callable this method returns true.
*/
- V8EXPORT bool IsCallable();
+ bool IsCallable();
/**
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- V8EXPORT Local<Value> CallAsFunction(Handle<Object> recv,
- int argc,
- Handle<Value> argv[]);
+ Local<Value> CallAsFunction(Handle<Object> recv,
+ int argc,
+ Handle<Value> argv[]);
/**
* Call an Object as a constructor if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
- V8EXPORT Local<Value> CallAsConstructor(int argc,
- Handle<Value> argv[]);
+ Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
- V8EXPORT static Local<Object> New();
- static inline Object* Cast(Value* obj);
+ static Local<Object> New();
+ V8_INLINE(static Object* Cast(Value* obj));
private:
- V8EXPORT Object();
- V8EXPORT static void CheckCast(Value* obj);
- V8EXPORT Local<Value> CheckedGetInternalField(int index);
- V8EXPORT void* SlowGetPointerFromInternalField(int index);
-
- /**
- * If quick access to the internal field is possible this method
- * returns the value. Otherwise an empty handle is returned.
- */
- inline Local<Value> UncheckedGetInternalField(int index);
+ Object();
+ static void CheckCast(Value* obj);
+ Local<Value> SlowGetInternalField(int index);
+ void* SlowGetAlignedPointerFromInternalField(int index);
};
/**
* An instance of the built-in array constructor (ECMA-262, 15.4.2).
*/
-class Array : public Object {
+class V8EXPORT Array : public Object {
public:
- V8EXPORT uint32_t Length() const;
+ uint32_t Length() const;
/**
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
- V8EXPORT Local<Object> CloneElementAt(uint32_t index);
+ Local<Object> CloneElementAt(uint32_t index);
/**
* Creates a JavaScript array with the given length. If the length
* is negative the returned array will have length 0.
*/
- V8EXPORT static Local<Array> New(int length = 0);
+ static Local<Array> New(int length = 0);
- static inline Array* Cast(Value* obj);
+ V8_INLINE(static Array* Cast(Value* obj));
private:
- V8EXPORT Array();
- V8EXPORT static void CheckCast(Value* obj);
+ Array();
+ static void CheckCast(Value* obj);
};
/**
* A JavaScript function object (ECMA-262, 15.3).
*/
-class Function : public Object {
+class V8EXPORT Function : public Object {
public:
- V8EXPORT Local<Object> NewInstance() const;
- V8EXPORT Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
- V8EXPORT Local<Value> Call(Handle<Object> recv,
- int argc,
- Handle<Value> argv[]);
- V8EXPORT void SetName(Handle<String> name);
- V8EXPORT Handle<Value> GetName() const;
+ Local<Object> NewInstance() const;
+ Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
+ Local<Value> Call(Handle<Object> recv, int argc, Handle<Value> argv[]);
+ void SetName(Handle<String> name);
+ Handle<Value> GetName() const;
/**
* Name inferred from variable or property assignment of this function.
@@ -1867,43 +1928,43 @@ class Function : public Object {
* in an OO style, where many functions are anonymous but are assigned
* to object properties.
*/
- V8EXPORT Handle<Value> GetInferredName() const;
+ Handle<Value> GetInferredName() const;
/**
* Returns zero based line number of function body and
* kLineOffsetNotFound if no information available.
*/
- V8EXPORT int GetScriptLineNumber() const;
+ int GetScriptLineNumber() const;
/**
* Returns zero based column number of function body and
* kLineOffsetNotFound if no information available.
*/
- V8EXPORT int GetScriptColumnNumber() const;
- V8EXPORT Handle<Value> GetScriptId() const;
- V8EXPORT ScriptOrigin GetScriptOrigin() const;
- static inline Function* Cast(Value* obj);
- V8EXPORT static const int kLineOffsetNotFound;
+ int GetScriptColumnNumber() const;
+ Handle<Value> GetScriptId() const;
+ ScriptOrigin GetScriptOrigin() const;
+ V8_INLINE(static Function* Cast(Value* obj));
+ static const int kLineOffsetNotFound;
private:
- V8EXPORT Function();
- V8EXPORT static void CheckCast(Value* obj);
+ Function();
+ static void CheckCast(Value* obj);
};
/**
* An instance of the built-in Date constructor (ECMA-262, 15.9).
*/
-class Date : public Object {
+class V8EXPORT Date : public Object {
public:
- V8EXPORT static Local<Value> New(double time);
+ static Local<Value> New(double time);
/**
* A specialization of Value::NumberValue that is more efficient
* because we know the structure of this object.
*/
- V8EXPORT double NumberValue() const;
+ double NumberValue() const;
- static inline Date* Cast(v8::Value* obj);
+ V8_INLINE(static Date* Cast(v8::Value* obj));
/**
* Notification that the embedder has changed the time zone,
@@ -1917,74 +1978,74 @@ class Date : public Object {
* This API should not be called more than needed as it will
* negatively impact the performance of date operations.
*/
- V8EXPORT static void DateTimeConfigurationChangeNotification();
+ static void DateTimeConfigurationChangeNotification();
private:
- V8EXPORT static void CheckCast(v8::Value* obj);
+ static void CheckCast(v8::Value* obj);
};
/**
* A Number object (ECMA-262, 4.3.21).
*/
-class NumberObject : public Object {
+class V8EXPORT NumberObject : public Object {
public:
- V8EXPORT static Local<Value> New(double value);
+ static Local<Value> New(double value);
/**
* Returns the Number held by the object.
*/
- V8EXPORT double NumberValue() const;
+ double NumberValue() const;
- static inline NumberObject* Cast(v8::Value* obj);
+ V8_INLINE(static NumberObject* Cast(v8::Value* obj));
private:
- V8EXPORT static void CheckCast(v8::Value* obj);
+ static void CheckCast(v8::Value* obj);
};
/**
* A Boolean object (ECMA-262, 4.3.15).
*/
-class BooleanObject : public Object {
+class V8EXPORT BooleanObject : public Object {
public:
- V8EXPORT static Local<Value> New(bool value);
+ static Local<Value> New(bool value);
/**
* Returns the Boolean held by the object.
*/
- V8EXPORT bool BooleanValue() const;
+ bool BooleanValue() const;
- static inline BooleanObject* Cast(v8::Value* obj);
+ V8_INLINE(static BooleanObject* Cast(v8::Value* obj));
private:
- V8EXPORT static void CheckCast(v8::Value* obj);
+ static void CheckCast(v8::Value* obj);
};
/**
* A String object (ECMA-262, 4.3.18).
*/
-class StringObject : public Object {
+class V8EXPORT StringObject : public Object {
public:
- V8EXPORT static Local<Value> New(Handle<String> value);
+ static Local<Value> New(Handle<String> value);
/**
* Returns the String held by the object.
*/
- V8EXPORT Local<String> StringValue() const;
+ Local<String> StringValue() const;
- static inline StringObject* Cast(v8::Value* obj);
+ V8_INLINE(static StringObject* Cast(v8::Value* obj));
private:
- V8EXPORT static void CheckCast(v8::Value* obj);
+ static void CheckCast(v8::Value* obj);
};
/**
* An instance of the built-in RegExp constructor (ECMA-262, 15.10).
*/
-class RegExp : public Object {
+class V8EXPORT RegExp : public Object {
public:
/**
* Regular expression flag bits. They can be or'ed to enable a set
@@ -2007,51 +2068,37 @@ class RegExp : public Object {
* static_cast<RegExp::Flags>(kGlobal | kMultiline))
* is equivalent to evaluating "/foo/gm".
*/
- V8EXPORT static Local<RegExp> New(Handle<String> pattern,
- Flags flags);
+ static Local<RegExp> New(Handle<String> pattern, Flags flags);
/**
* Returns the value of the source property: a string representing
* the regular expression.
*/
- V8EXPORT Local<String> GetSource() const;
+ Local<String> GetSource() const;
/**
* Returns the flags bit field.
*/
- V8EXPORT Flags GetFlags() const;
+ Flags GetFlags() const;
- static inline RegExp* Cast(v8::Value* obj);
+ V8_INLINE(static RegExp* Cast(v8::Value* obj));
private:
- V8EXPORT static void CheckCast(v8::Value* obj);
+ static void CheckCast(v8::Value* obj);
};
/**
- * A JavaScript value that wraps a C++ void*. This type of value is
- * mainly used to associate C++ data structures with JavaScript
- * objects.
- *
- * The Wrap function V8 will return the most optimal Value object wrapping the
- * C++ void*. The type of the value is not guaranteed to be an External object
- * and no assumptions about its type should be made. To access the wrapped
- * value Unwrap should be used, all other operations on that object will lead
- * to unpredictable results.
+ * A JavaScript value that wraps a C++ void*. This type of value is mainly used
+ * to associate C++ data structures with JavaScript objects.
*/
-class External : public Value {
+class V8EXPORT External : public Value {
public:
- V8EXPORT static Local<Value> Wrap(void* data);
- static inline void* Unwrap(Handle<Value> obj);
-
- V8EXPORT static Local<External> New(void* value);
- static inline External* Cast(Value* obj);
- V8EXPORT void* Value() const;
+ static Local<External> New(void* value);
+ V8_INLINE(static External* Cast(Value* obj));
+ void* Value() const;
private:
- V8EXPORT External();
- V8EXPORT static void CheckCast(v8::Value* obj);
- static inline void* QuickUnwrap(Handle<v8::Value> obj);
- V8EXPORT static void* FullUnwrap(Handle<v8::Value> obj);
+ static void CheckCast(v8::Value* obj);
};
@@ -2066,7 +2113,7 @@ class V8EXPORT Template : public Data {
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- inline void Set(const char* name, Handle<Data> value);
+ V8_INLINE(void Set(const char* name, Handle<Data> value));
private:
Template();
@@ -2081,16 +2128,16 @@ class V8EXPORT Template : public Data {
* including the receiver, the number and values of arguments, and
* the holder of the function.
*/
-class Arguments {
+class V8EXPORT Arguments {
public:
- inline int Length() const;
- inline Local<Value> operator[](int i) const;
- inline Local<Function> Callee() const;
- inline Local<Object> This() const;
- inline Local<Object> Holder() const;
- inline bool IsConstructCall() const;
- inline Local<Value> Data() const;
- inline Isolate* GetIsolate() const;
+ V8_INLINE(int Length() const);
+ V8_INLINE(Local<Value> operator[](int i) const);
+ V8_INLINE(Local<Function> Callee() const);
+ V8_INLINE(Local<Object> This() const);
+ V8_INLINE(Local<Object> Holder() const);
+ V8_INLINE(bool IsConstructCall() const);
+ V8_INLINE(Local<Value> Data() const);
+ V8_INLINE(Isolate* GetIsolate() const);
private:
static const int kIsolateIndex = 0;
@@ -2099,10 +2146,10 @@ class Arguments {
static const int kHolderIndex = -3;
friend class ImplementationUtilities;
- inline Arguments(internal::Object** implicit_args,
+ V8_INLINE(Arguments(internal::Object** implicit_args,
internal::Object** values,
int length,
- bool is_construct_call);
+ bool is_construct_call));
internal::Object** implicit_args_;
internal::Object** values_;
int length_;
@@ -2116,12 +2163,12 @@ class Arguments {
*/
class V8EXPORT AccessorInfo {
public:
- inline AccessorInfo(internal::Object** args)
+ V8_INLINE(AccessorInfo(internal::Object** args))
: args_(args) { }
- inline Isolate* GetIsolate() const;
- inline Local<Value> Data() const;
- inline Local<Object> This() const;
- inline Local<Object> Holder() const;
+ V8_INLINE(Isolate* GetIsolate() const);
+ V8_INLINE(Local<Value> Data() const);
+ V8_INLINE(Local<Object> This() const);
+ V8_INLINE(Local<Object> Holder() const);
private:
internal::Object** args_;
@@ -2339,7 +2386,8 @@ class V8EXPORT FunctionTemplate : public Template {
static Local<FunctionTemplate> New(
InvocationCallback callback = 0,
Handle<Value> data = Handle<Value>(),
- Handle<Signature> signature = Handle<Signature>());
+ Handle<Signature> signature = Handle<Signature>(),
+ int length = 0);
/** Returns the unique function instance in the current execution context.*/
Local<Function> GetFunction();
@@ -2351,6 +2399,9 @@ class V8EXPORT FunctionTemplate : public Template {
void SetCallHandler(InvocationCallback callback,
Handle<Value> data = Handle<Value>());
+ /** Set the predefined length property for the FunctionTemplate. */
+ void SetLength(int length);
+
/** Get the InstanceTemplate. */
Local<ObjectTemplate> InstanceTemplate();
@@ -2363,7 +2414,6 @@ class V8EXPORT FunctionTemplate : public Template {
*/
Local<ObjectTemplate> PrototypeTemplate();
-
/**
* Set the class name of the FunctionTemplate. This is used for
* printing objects created with the function created from the
@@ -2705,7 +2755,7 @@ void V8EXPORT RegisterExtension(Extension* extension);
*/
class V8EXPORT DeclareExtension {
public:
- inline DeclareExtension(Extension* extension) {
+ V8_INLINE(DeclareExtension(Extension* extension)) {
RegisterExtension(extension);
}
};
@@ -2719,10 +2769,10 @@ Handle<Primitive> V8EXPORT Null();
Handle<Boolean> V8EXPORT True();
Handle<Boolean> V8EXPORT False();
-inline Handle<Primitive> Undefined(Isolate* isolate);
-inline Handle<Primitive> Null(Isolate* isolate);
-inline Handle<Boolean> True(Isolate* isolate);
-inline Handle<Boolean> False(Isolate* isolate);
+V8_INLINE(Handle<Primitive> Undefined(Isolate* isolate));
+V8_INLINE(Handle<Primitive> Null(Isolate* isolate));
+V8_INLINE(Handle<Boolean> True(Isolate* isolate));
+V8_INLINE(Handle<Boolean> False(Isolate* isolate));
/**
@@ -2885,16 +2935,6 @@ class V8EXPORT HeapStatistics {
size_t heap_size_limit() { return heap_size_limit_; }
private:
- void set_total_heap_size(size_t size) { total_heap_size_ = size; }
- void set_total_heap_size_executable(size_t size) {
- total_heap_size_executable_ = size;
- }
- void set_total_physical_size(size_t size) {
- total_physical_size_ = size;
- }
- void set_used_heap_size(size_t size) { used_heap_size_ = size; }
- void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
-
size_t total_heap_size_;
size_t total_heap_size_executable_;
size_t total_physical_size_;
@@ -2902,6 +2942,7 @@ class V8EXPORT HeapStatistics {
size_t heap_size_limit_;
friend class V8;
+ friend class Isolate;
};
@@ -2983,13 +3024,18 @@ class V8EXPORT Isolate {
/**
* Associate embedder-specific data with the isolate
*/
- inline void SetData(void* data);
+ V8_INLINE(void SetData(void* data));
/**
* Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
- inline void* GetData();
+ V8_INLINE(void* GetData());
+
+ /**
+ * Get statistics about the heap memory usage.
+ */
+ void GetHeapStatistics(HeapStatistics* heap_statistics);
private:
Isolate();
@@ -3001,7 +3047,7 @@ class V8EXPORT Isolate {
};
-class StartupData {
+class V8EXPORT StartupData {
public:
enum CompressionAlgorithm {
kUncompressed,
@@ -3087,7 +3133,19 @@ struct JitCodeEvent {
enum EventType {
CODE_ADDED,
CODE_MOVED,
- CODE_REMOVED
+ CODE_REMOVED,
+ CODE_ADD_LINE_POS_INFO,
+ CODE_START_LINE_INFO_RECORDING,
+ CODE_END_LINE_INFO_RECORDING
+ };
+ // Definition of the code position type. The "POSITION" type means the place
+ // in the source code which are of interest when making stack traces to
+ // pin-point the source location of a stack frame as close as possible.
+ // The "STATEMENT_POSITION" means the place at the beginning of each
+ // statement, and is used to indicate possible break locations.
+ enum PositionType {
+ POSITION,
+ STATEMENT_POSITION
};
// Type of event.
@@ -3096,6 +3154,13 @@ struct JitCodeEvent {
void* code_start;
// Size of the instructions.
size_t code_len;
+ // Script info for CODE_ADDED event.
+ Handle<Script> script;
+ // User-defined data for *_LINE_INFO_* event. It's used to hold the source
+ // code line information which is returned from the
+ // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+ // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+ void* user_data;
union {
// Only valid for CODE_ADDED.
@@ -3106,6 +3171,17 @@ struct JitCodeEvent {
// Number of chars in str.
size_t len;
} name;
+
+ // Only valid for CODE_ADD_LINE_POS_INFO
+ struct {
+ // PC offset
+ size_t offset;
+ // Code postion
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ } line_info;
+
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
};
@@ -3214,8 +3290,12 @@ class V8EXPORT V8 {
*
* The same message listener can be added more than once and in that
* case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
*/
- static bool AddMessageListener(MessageCallback that);
+ static bool AddMessageListener(MessageCallback that,
+ Handle<Value> data = Handle<Value>());
/**
* Remove all message listeners from the specified callback function.
@@ -3261,17 +3341,12 @@ class V8EXPORT V8 {
static void SetCreateHistogramFunction(CreateHistogramCallback);
static void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
- /**
- * Enables the computation of a sliding window of states. The sliding
- * window information is recorded in statistics counters.
- */
- static void EnableSlidingStateWindow();
-
/** Callback function for reporting failed access checks.*/
static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
/** Callback for user object comparisons */
- static void SetUserObjectComparisonCallbackFunction(UserObjectComparisonCallback);
+ static void SetUserObjectComparisonCallbackFunction(
+ UserObjectComparisonCallback);
/**
* Enables the host application to receive a notification before a
@@ -3300,7 +3375,7 @@ class V8EXPORT V8 {
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects.
*/
- static void SetGlobalGCPrologueCallback(GCCallback);
+ V8_DEPRECATED(static void SetGlobalGCPrologueCallback(GCCallback));
/**
* Enables the host application to receive a notification after a
@@ -3329,7 +3404,7 @@ class V8EXPORT V8 {
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects.
*/
- static void SetGlobalGCEpilogueCallback(GCCallback);
+ V8_DEPRECATED(static void SetGlobalGCEpilogueCallback(GCCallback));
/**
* Enables the host application to provide a mechanism to be notified
@@ -3373,6 +3448,10 @@ class V8EXPORT V8 {
static void AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info = NULL);
+ static void AddObjectGroup(Isolate* isolate,
+ Persistent<Value>* objects,
+ size_t length,
+ RetainedObjectInfo* info = NULL);
/**
* Allows the host application to declare implicit references between
@@ -3550,10 +3629,8 @@ class V8EXPORT V8 {
*/
static bool Dispose();
- /**
- * Get statistics about the heap memory usage.
- */
- static void GetHeapStatistics(HeapStatistics* heap_statistics);
+ /** Deprecated. Use Isolate::GetHeapStatistics instead. */
+ V8_DEPRECATED(static void GetHeapStatistics(HeapStatistics* heap_statistics));
/**
* Iterates through all external resources referenced from current isolate
@@ -3569,6 +3646,16 @@ class V8EXPORT V8 {
static void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
/**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids and are candidates to be marked as partially dependent
+ * handles. This will visit handles to young objects created since the last
+ * garbage collection but is free to visit an arbitrary superset of these
+ * objects.
+ */
+ static void VisitHandlesForPartialDependence(
+ Isolate* isolate, PersistentHandleVisitor* visitor);
+
+ /**
* Optional notification that the embedder is idle.
* V8 uses the notification to reduce memory footprint.
* This call can be used repeatedly if the embedder remains idle.
@@ -3599,28 +3686,17 @@ class V8EXPORT V8 {
private:
V8();
- static internal::Object** GlobalizeReference(internal::Object** handle);
- static void DisposeGlobal(internal::Object** global_handle);
+ static internal::Object** GlobalizeReference(internal::Isolate* isolate,
+ internal::Object** handle);
static void DisposeGlobal(internal::Isolate* isolate,
internal::Object** global_handle);
- static void MakeWeak(internal::Object** global_handle,
+ static void MakeWeak(internal::Isolate* isolate,
+ internal::Object** global_handle,
void* data,
- WeakReferenceCallback);
- static void ClearWeak(internal::Object** global_handle);
- static void MarkIndependent(internal::Object** global_handle);
- static void MarkIndependent(internal::Isolate* isolate,
- internal::Object** global_handle);
- static void MarkPartiallyDependent(internal::Object** global_handle);
- static void MarkPartiallyDependent(internal::Isolate* isolate,
- internal::Object** global_handle);
- static bool IsGlobalIndependent(internal::Object** global_handle);
- static bool IsGlobalIndependent(internal::Isolate* isolate,
- internal::Object** global_handle);
- static bool IsGlobalNearDeath(internal::Object** global_handle);
- static bool IsGlobalWeak(internal::Object** global_handle);
- static void SetWrapperClassId(internal::Object** global_handle,
- uint16_t class_id);
- static uint16_t GetWrapperClassId(internal::Object** global_handle);
+ WeakReferenceCallback weak_reference_callback,
+ NearDeathCallback near_death_callback);
+ static void ClearWeak(internal::Isolate* isolate,
+ internal::Object** global_handle);
template <class T> friend class Handle;
template <class T> friend class Local;
@@ -3635,7 +3711,9 @@ class V8EXPORT V8 {
class V8EXPORT TryCatch {
public:
/**
- * Creates a new try/catch block and registers it with v8.
+ * Creates a new try/catch block and registers it with v8. Note that
+ * all TryCatch blocks should be stack allocated because the memory
+ * location itself is compared against JavaScript try/catch blocks.
*/
TryCatch();
@@ -3725,6 +3803,12 @@ class V8EXPORT TryCatch {
void SetCaptureMessage(bool value);
private:
+ // Make it hard to create heap-allocated TryCatch blocks.
+ TryCatch(const TryCatch&);
+ void operator=(const TryCatch&);
+ void* operator new(size_t size);
+ void operator delete(void*, size_t);
+
v8::internal::Isolate* isolate_;
void* next_;
void* exception_;
@@ -3867,13 +3951,37 @@ class V8EXPORT Context {
/** Returns true if V8 has a current context. */
static bool InContext();
+ /** Returns an isolate associated with a current context. */
+ v8::Isolate* GetIsolate();
+
+ /**
+ * Gets the embedder data with the given index, which must have been set by a
+ * previous call to SetEmbedderData with the same index. Note that index 0
+ * currently has a special meaning for Chrome's debugger.
+ */
+ V8_INLINE(Local<Value> GetEmbedderData(int index));
+
+ /**
+ * Sets the embedder data with the given index, growing the data as
+ * needed. Note that index 0 currently has a special meaning for Chrome's
+ * debugger.
+ */
+ void SetEmbedderData(int index, Handle<Value> value);
+
+ /**
+ * Gets a 2-byte-aligned native pointer from the embedder data with the given
+ * index, which must have bees set by a previous call to
+ * SetAlignedPointerInEmbedderData with the same index. Note that index 0
+ * currently has a special meaning for Chrome's debugger.
+ */
+ V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index));
+
/**
- * Associate an additional data object with the context. This is mainly used
- * with the debugger to provide additional information on the context through
- * the debugger API.
+ * Sets a 2-byte-aligned native pointer in the embedder data with the given
+ * index, growing the data as needed. Note that index 0 currently has a
+ * special meaning for Chrome's debugger.
*/
- void SetData(Handle<Value> data);
- Local<Value> GetData();
+ void SetAlignedPointerInEmbedderData(int index, void* value);
/**
* Control whether code generation from strings is allowed. Calling
@@ -3909,10 +4017,10 @@ class V8EXPORT Context {
*/
class Scope {
public:
- explicit inline Scope(Handle<Context> context) : context_(context) {
+ explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
context_->Enter();
}
- inline ~Scope() { context_->Exit(); }
+ V8_INLINE(~Scope()) { context_->Exit(); }
private:
Handle<Context> context_;
};
@@ -3922,25 +4030,26 @@ class V8EXPORT Context {
friend class Script;
friend class Object;
friend class Function;
+
+ Local<Value> SlowGetEmbedderData(int index);
+ void* SlowGetAlignedPointerFromEmbedderData(int index);
};
/**
- * Multiple threads in V8 are allowed, but only one thread at a time
- * is allowed to use any given V8 isolate. See Isolate class
- * comments. The definition of 'using V8 isolate' includes
- * accessing handles or holding onto object pointers obtained
- * from V8 handles while in the particular V8 isolate. It is up
- * to the user of V8 to ensure (perhaps with locking) that this
- * constraint is not violated. In addition to any other synchronization
- * mechanism that may be used, the v8::Locker and v8::Unlocker classes
- * must be used to signal thead switches to V8.
+ * Multiple threads in V8 are allowed, but only one thread at a time is allowed
+ * to use any given V8 isolate, see the comments in the Isolate class. The
+ * definition of 'using a V8 isolate' includes accessing handles or holding onto
+ * object pointers obtained from V8 handles while in the particular V8 isolate.
+ * It is up to the user of V8 to ensure, perhaps with locking, that this
+ * constraint is not violated. In addition to any other synchronization
+ * mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
+ * used to signal thead switches to V8.
*
- * v8::Locker is a scoped lock object. While it's
- * active (i.e. between its construction and destruction) the current thread is
- * allowed to use the locked isolate. V8 guarantees that an isolate can be
- * locked by at most one thread at any time. In other words, the scope of a
- * v8::Locker is a critical section.
+ * v8::Locker is a scoped lock object. While it's active, i.e. between its
+ * construction and destruction, the current thread is allowed to use the locked
+ * isolate. V8 guarantees that an isolate can be locked by at most one thread at
+ * any time. In other words, the scope of a v8::Locker is a critical section.
*
* Sample usage:
* \code
@@ -3954,9 +4063,9 @@ class V8EXPORT Context {
* } // Destructor called here
* \endcode
*
- * If you wish to stop using V8 in a thread A you can do this either
- * by destroying the v8::Locker object as above or by constructing a
- * v8::Unlocker object:
+ * If you wish to stop using V8 in a thread A you can do this either by
+ * destroying the v8::Locker object as above or by constructing a v8::Unlocker
+ * object:
*
* \code
* {
@@ -3969,19 +4078,17 @@ class V8EXPORT Context {
* isolate->Enter();
* \endcode
*
- * The Unlocker object is intended for use in a long-running callback
- * from V8, where you want to release the V8 lock for other threads to
- * use.
+ * The Unlocker object is intended for use in a long-running callback from V8,
+ * where you want to release the V8 lock for other threads to use.
*
- * The v8::Locker is a recursive lock. That is, you can lock more than
- * once in a given thread. This can be useful if you have code that can
- * be called either from code that holds the lock or from code that does
- * not. The Unlocker is not recursive so you can not have several
- * Unlockers on the stack at once, and you can not use an Unlocker in a
- * thread that is not inside a Locker's scope.
+ * The v8::Locker is a recursive lock, i.e. you can lock more than once in a
+ * given thread. This can be useful if you have code that can be called either
+ * from code that holds the lock or from code that does not. The Unlocker is
+ * not recursive so you can not have several Unlockers on the stack at once, and
+ * you can not use an Unlocker in a thread that is not inside a Locker's scope.
*
- * An unlocker will unlock several lockers if it has to and reinstate
- * the correct depth of locking on its destruction. eg.:
+ * An unlocker will unlock several lockers if it has to and reinstate the
+ * correct depth of locking on its destruction, e.g.:
*
* \code
* // V8 not locked.
@@ -4004,17 +4111,21 @@ class V8EXPORT Context {
* }
* // V8 Now no longer locked.
* \endcode
- *
- *
*/
class V8EXPORT Unlocker {
public:
/**
- * Initialize Unlocker for a given Isolate. NULL means default isolate.
+ * Initialize Unlocker for a given Isolate.
*/
- explicit Unlocker(Isolate* isolate = NULL);
+ V8_INLINE(explicit Unlocker(Isolate* isolate)) { Initialize(isolate); }
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(Unlocker());
+
~Unlocker();
private:
+ void Initialize(Isolate* isolate);
+
internal::Isolate* isolate_;
};
@@ -4022,9 +4133,13 @@ class V8EXPORT Unlocker {
class V8EXPORT Locker {
public:
/**
- * Initialize Locker for a given Isolate. NULL means default isolate.
+ * Initialize Locker for a given Isolate.
*/
- explicit Locker(Isolate* isolate = NULL);
+ V8_INLINE(explicit Locker(Isolate* isolate)) { Initialize(isolate); }
+
+ /** Deprecated. Use Isolate version instead. */
+ V8_DEPRECATED(Locker());
+
~Locker();
/**
@@ -4042,10 +4157,10 @@ class V8EXPORT Locker {
static void StopPreemption();
/**
- * Returns whether or not the locker for a given isolate, or default isolate
- * if NULL is given, is locked by the current thread.
+ * Returns whether or not the locker for a given isolate, is locked by the
+ * current thread.
*/
- static bool IsLocked(Isolate* isolate = NULL);
+ static bool IsLocked(Isolate* isolate);
/**
* Returns whether v8::Locker is being used by this V8 instance.
@@ -4053,6 +4168,8 @@ class V8EXPORT Locker {
static bool IsActive();
private:
+ void Initialize(Isolate* isolate);
+
bool has_lock_;
bool top_level_;
internal::Isolate* isolate_;
@@ -4153,47 +4270,27 @@ template <size_t ptr_size> struct SmiTagging;
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
- static inline int SmiToInt(internal::Object* value) {
+ V8_INLINE(static int SmiToInt(internal::Object* value)) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
-
- // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
- // with a plain reinterpret_cast.
- static const uintptr_t kEncodablePointerMask = 0x1;
- static const int kPointerToSmiShift = 0;
};
// Smi constants for 64-bit systems.
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
- static inline int SmiToInt(internal::Object* value) {
+ V8_INLINE(static int SmiToInt(internal::Object* value)) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
-
- // To maximize the range of pointers that can be encoded
- // in the available 32 bits, we require them to be 8 bytes aligned.
- // This gives 2 ^ (32 + 3) = 32G address space covered.
- // It might be not enough to cover stack allocated objects on some platforms.
- static const int kPointerAlignment = 3;
-
- static const uintptr_t kEncodablePointerMask =
- ~(uintptr_t(0xffffffff) << kPointerAlignment);
-
- static const int kPointerToSmiShift =
- kSmiTagSize + kSmiShiftSize - kPointerAlignment;
};
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
-const uintptr_t kEncodablePointerMask =
- PlatformSmiTagging::kEncodablePointerMask;
-const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
/**
* This class exports constants and functionality from within v8 that
@@ -4211,6 +4308,9 @@ class Internals {
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
+ static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
+ static const int kContextHeaderSize = 2 * kApiPointerSize;
+ static const int kContextEmbedderDataIndex = 55;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -4223,95 +4323,121 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptySymbolRootIndex = 118;
+ static const int kEmptyStringRootIndex = 119;
- static const int kJSObjectType = 0xaa;
+ static const int kNodeClassIdOffset = 1 * kApiPointerSize;
+ static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
+ static const int kNodeStateMask = 0xf;
+ static const int kNodeStateIsWeakValue = 2;
+ static const int kNodeStateIsNearDeathValue = 4;
+ static const int kNodeIsIndependentShift = 4;
+ static const int kNodeIsPartiallyDependentShift = 5;
+
+ static const int kJSObjectType = 0xae;
static const int kFirstNonstringType = 0x80;
- static const int kOddballType = 0x82;
- static const int kForeignType = 0x85;
+ static const int kOddballType = 0x83;
+ static const int kForeignType = 0x86;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
- static inline bool HasHeapObjectTag(internal::Object* value) {
+ V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
- static inline bool HasSmiTag(internal::Object* value) {
- return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
- }
-
- static inline int SmiValue(internal::Object* value) {
+ V8_INLINE(static int SmiValue(internal::Object* value)) {
return PlatformSmiTagging::SmiToInt(value);
}
- static inline int GetInstanceType(internal::Object* obj) {
+ V8_INLINE(static int GetInstanceType(internal::Object* obj)) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
- static inline int GetOddballKind(internal::Object* obj) {
+ V8_INLINE(static int GetOddballKind(internal::Object* obj)) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
- static inline void* GetExternalPointerFromSmi(internal::Object* value) {
- const uintptr_t address = reinterpret_cast<uintptr_t>(value);
- return reinterpret_cast<void*>(address >> kPointerToSmiShift);
- }
-
- static inline void* GetExternalPointer(internal::Object* obj) {
- if (HasSmiTag(obj)) {
- return GetExternalPointerFromSmi(obj);
- } else if (GetInstanceType(obj) == kForeignType) {
- return ReadField<void*>(obj, kForeignAddressOffset);
- } else {
- return NULL;
- }
- }
-
- static inline bool IsExternalTwoByteString(int instance_type) {
+ V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
- static inline bool IsInitialized(v8::Isolate* isolate) {
+ V8_INLINE(static bool IsInitialized(v8::Isolate* isolate)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
return *reinterpret_cast<int*>(addr) == 1;
}
- static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
+ V8_INLINE(static uint8_t GetNodeFlag(internal::Object** obj, int shift)) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ return *addr & (1 << shift);
+ }
+
+ V8_INLINE(static void UpdateNodeFlag(internal::Object** obj,
+ bool value, int shift)) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ uint8_t mask = 1 << shift;
+ *addr = (*addr & ~mask) | (value << shift);
+ }
+
+ V8_INLINE(static uint8_t GetNodeState(internal::Object** obj)) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ return *addr & kNodeStateMask;
+ }
+
+ V8_INLINE(static void UpdateNodeState(internal::Object** obj,
+ uint8_t value)) {
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
+ *addr = (*addr & ~kNodeStateMask) | value;
+ }
+
+ V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
- static inline void* GetEmbedderData(v8::Isolate* isolate) {
+ V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
- static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
+ V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate,
+ int index)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T>
- static inline T ReadField(Object* ptr, int offset) {
+ V8_INLINE(static T ReadField(Object* ptr, int offset)) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
- static inline bool CanCastToHeapObject(void*) { return false; }
- static inline bool CanCastToHeapObject(Context*) { return true; }
- static inline bool CanCastToHeapObject(String*) { return true; }
- static inline bool CanCastToHeapObject(Object*) { return true; }
- static inline bool CanCastToHeapObject(Message*) { return true; }
- static inline bool CanCastToHeapObject(StackTrace*) { return true; }
- static inline bool CanCastToHeapObject(StackFrame*) { return true; }
+ template <typename T>
+ V8_INLINE(static T ReadEmbedderData(Context* context, int index)) {
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O* ctx = *reinterpret_cast<O**>(context);
+ int embedder_data_offset = I::kContextHeaderSize +
+ (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
+ O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
+ int value_offset =
+ I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
+ return I::ReadField<T>(embedder_data, value_offset);
+ }
+
+ V8_INLINE(static bool CanCastToHeapObject(void*)) { return false; }
+ V8_INLINE(static bool CanCastToHeapObject(Context*)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(String*)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(Object*)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(Message*)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(StackTrace*)) { return true; }
+ V8_INLINE(static bool CanCastToHeapObject(StackFrame*)) { return true; }
};
} // namespace internal
@@ -4335,46 +4461,82 @@ Local<T> Local<T>::New(Handle<T> that) {
template <class T>
+Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
+ if (that.IsEmpty()) return Local<T>();
+ T* that_ptr = *that;
+ internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
+ reinterpret_cast<internal::Isolate*>(isolate), *p)));
+}
+
+
+template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) {
+ return New(Isolate::GetCurrent(), that);
+}
+
+
+template <class T>
+Persistent<T> Persistent<T>::New(Isolate* isolate, Handle<T> that) {
if (that.IsEmpty()) return Persistent<T>();
internal::Object** p = reinterpret_cast<internal::Object**>(*that);
- return Persistent<T>(reinterpret_cast<T*>(V8::GlobalizeReference(p)));
+ return Persistent<T>(reinterpret_cast<T*>(
+ V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
+ p)));
}
template <class T>
bool Persistent<T>::IsIndependent() const {
- if (this->IsEmpty()) return false;
- return V8::IsGlobalIndependent(reinterpret_cast<internal::Object**>(**this));
+ return IsIndependent(Isolate::GetCurrent());
}
template <class T>
bool Persistent<T>::IsIndependent(Isolate* isolate) const {
+ typedef internal::Internals I;
if (this->IsEmpty()) return false;
- return V8::IsGlobalIndependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
+ if (!I::IsInitialized(isolate)) return false;
+ return I::GetNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ I::kNodeIsIndependentShift);
}
template <class T>
bool Persistent<T>::IsNearDeath() const {
+ return IsNearDeath(Isolate::GetCurrent());
+}
+
+
+template <class T>
+bool Persistent<T>::IsNearDeath(Isolate* isolate) const {
+ typedef internal::Internals I;
if (this->IsEmpty()) return false;
- return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
+ if (!I::IsInitialized(isolate)) return false;
+ return I::GetNodeState(reinterpret_cast<internal::Object**>(**this)) ==
+ I::kNodeStateIsNearDeathValue;
}
template <class T>
bool Persistent<T>::IsWeak() const {
+ return IsWeak(Isolate::GetCurrent());
+}
+
+
+template <class T>
+bool Persistent<T>::IsWeak(Isolate* isolate) const {
+ typedef internal::Internals I;
if (this->IsEmpty()) return false;
- return V8::IsGlobalWeak(reinterpret_cast<internal::Object**>(**this));
+ if (!I::IsInitialized(isolate)) return false;
+ return I::GetNodeState(reinterpret_cast<internal::Object**>(**this)) ==
+ I::kNodeStateIsWeakValue;
}
template <class T>
void Persistent<T>::Dispose() {
- if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
+ Dispose(Isolate::GetCurrent());
}
@@ -4391,46 +4553,94 @@ Persistent<T>::Persistent() : Handle<T>() { }
template <class T>
void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
- V8::MakeWeak(reinterpret_cast<internal::Object**>(**this),
+ Isolate* isolate = Isolate::GetCurrent();
+ V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
+ reinterpret_cast<internal::Object**>(**this),
+ parameters,
+ callback,
+ NULL);
+}
+
+template <class T>
+void Persistent<T>::MakeWeak(Isolate* isolate,
+ void* parameters,
+ NearDeathCallback callback) {
+ V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
+ reinterpret_cast<internal::Object**>(**this),
parameters,
+ NULL,
callback);
}
template <class T>
void Persistent<T>::ClearWeak() {
- V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
+ ClearWeak(Isolate::GetCurrent());
+}
+
+template <class T>
+void Persistent<T>::ClearWeak(Isolate* isolate) {
+ V8::ClearWeak(reinterpret_cast<internal::Isolate*>(isolate),
+ reinterpret_cast<internal::Object**>(**this));
}
template <class T>
void Persistent<T>::MarkIndependent() {
- V8::MarkIndependent(reinterpret_cast<internal::Object**>(**this));
+ MarkIndependent(Isolate::GetCurrent());
}
template <class T>
void Persistent<T>::MarkIndependent(Isolate* isolate) {
- V8::MarkIndependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
+ typedef internal::Internals I;
+ if (this->IsEmpty()) return;
+ if (!I::IsInitialized(isolate)) return;
+ I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ true,
+ I::kNodeIsIndependentShift);
}
template <class T>
void Persistent<T>::MarkPartiallyDependent() {
- V8::MarkPartiallyDependent(reinterpret_cast<internal::Object**>(**this));
+ MarkPartiallyDependent(Isolate::GetCurrent());
}
template <class T>
void Persistent<T>::MarkPartiallyDependent(Isolate* isolate) {
- V8::MarkPartiallyDependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
+ typedef internal::Internals I;
+ if (this->IsEmpty()) return;
+ if (!I::IsInitialized(isolate)) return;
+ I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(**this),
+ true,
+ I::kNodeIsPartiallyDependentShift);
}
template <class T>
void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
- V8::SetWrapperClassId(reinterpret_cast<internal::Object**>(**this), class_id);
+ SetWrapperClassId(Isolate::GetCurrent(), class_id);
+}
+
+template <class T>
+void Persistent<T>::SetWrapperClassId(Isolate* isolate, uint16_t class_id) {
+ typedef internal::Internals I;
+ if (this->IsEmpty()) return;
+ if (!I::IsInitialized(isolate)) return;
+ internal::Object** obj = reinterpret_cast<internal::Object**>(**this);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
}
template <class T>
uint16_t Persistent<T>::WrapperClassId() const {
- return V8::GetWrapperClassId(reinterpret_cast<internal::Object**>(**this));
+ return WrapperClassId(Isolate::GetCurrent());
+}
+
+template <class T>
+uint16_t Persistent<T>::WrapperClassId(Isolate* isolate) const {
+ typedef internal::Internals I;
+ if (this->IsEmpty()) return 0;
+ if (!I::IsInitialized(isolate)) return 0;
+ internal::Object** obj = reinterpret_cast<internal::Object**>(**this);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
}
Arguments::Arguments(internal::Object** implicit_args,
@@ -4519,63 +4729,35 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
- Local<Value> quick_result = UncheckedGetInternalField(index);
- if (!quick_result.IsEmpty()) return quick_result;
-#endif
- return CheckedGetInternalField(index);
-}
-
-
-Local<Value> Object::UncheckedGetInternalField(int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
if (I::GetInstanceType(obj) == I::kJSObjectType) {
- // If the object is a plain JSObject, which is the common case,
- // we know where to find the internal fields and can return the
- // value directly.
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(value);
return Local<Value>(reinterpret_cast<Value*>(result));
- } else {
- return Local<Value>();
}
-}
-
-
-void* External::Unwrap(Handle<v8::Value> obj) {
-#ifdef V8_ENABLE_CHECKS
- return FullUnwrap(obj);
-#else
- return QuickUnwrap(obj);
#endif
+ return SlowGetInternalField(index);
}
-void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
- typedef internal::Object O;
- O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
- return internal::Internals::GetExternalPointer(obj);
-}
-
-
-void* Object::GetPointerFromInternalField(int index) {
+void* Object::GetAlignedPointerFromInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
typedef internal::Internals I;
-
O* obj = *reinterpret_cast<O**>(this);
-
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
if (I::GetInstanceType(obj) == I::kJSObjectType) {
- // If the object is a plain JSObject, which is the common case,
- // we know where to find the internal fields and can return the
- // value directly.
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- O* value = I::ReadField<O*>(obj, offset);
- return I::GetExternalPointer(value);
+ return I::ReadField<void*>(obj, offset);
}
-
- return SlowGetPointerFromInternalField(index);
+#endif
+ return SlowGetAlignedPointerFromInternalField(index);
}
@@ -4591,7 +4773,7 @@ Local<String> String::Empty(Isolate* isolate) {
typedef internal::Object* S;
typedef internal::Internals I;
if (!I::IsInitialized(isolate)) return Empty();
- S* slot = I::GetRoot(isolate, I::kEmptySymbolRootIndex);
+ S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
return Local<String>(reinterpret_cast<String*>(slot));
}
@@ -4843,6 +5025,28 @@ void* Isolate::GetData() {
}
+Local<Value> Context::GetEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ typedef internal::Object O;
+ typedef internal::Internals I;
+ O** result = HandleScope::CreateHandle(I::ReadEmbedderData<O*>(this, index));
+ return Local<Value>(reinterpret_cast<Value*>(result));
+#else
+ return SlowGetEmbedderData(index);
+#endif
+}
+
+
+void* Context::GetAlignedPointerFromEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ typedef internal::Internals I;
+ return I::ReadEmbedderData<void*>(this, index);
+#else
+ return SlowGetAlignedPointerFromEmbedderData(index);
+#endif
+}
+
+
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/src/3rdparty/v8/samples/lineprocessor.cc b/src/3rdparty/v8/samples/lineprocessor.cc
index 26e787f..6549f4c 100644
--- a/src/3rdparty/v8/samples/lineprocessor.cc
+++ b/src/3rdparty/v8/samples/lineprocessor.cc
@@ -212,9 +212,10 @@ int RunMain(int argc, char* argv[]) {
v8::Context::Scope context_scope(context);
#ifdef ENABLE_DEBUGGER_SUPPORT
- debug_message_context = v8::Persistent<v8::Context>::New(context);
+ debug_message_context =
+ v8::Persistent<v8::Context>::New(context->GetIsolate(), context);
- v8::Locker locker;
+ v8::Locker locker(context->GetIsolate());
if (support_callback) {
v8::Debug::SetDebugMessageDispatchHandler(DispatchDebugMessages, true);
@@ -265,7 +266,7 @@ int RunMain(int argc, char* argv[]) {
bool RunCppCycle(v8::Handle<v8::Script> script, v8::Local<v8::Context> context,
bool report_exceptions) {
#ifdef ENABLE_DEBUGGER_SUPPORT
- v8::Locker lock;
+ v8::Locker lock(v8::Isolate::GetCurrent());
#endif // ENABLE_DEBUGGER_SUPPORT
v8::Handle<v8::String> fun_name = v8::String::New("ProcessLine");
@@ -420,7 +421,7 @@ v8::Handle<v8::String> ReadLine() {
char* res;
{
#ifdef ENABLE_DEBUGGER_SUPPORT
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(v8::Isolate::GetCurrent());
#endif // ENABLE_DEBUGGER_SUPPORT
res = fgets(buffer, kBufferSize, stdin);
}
diff --git a/src/3rdparty/v8/samples/process.cc b/src/3rdparty/v8/samples/process.cc
index ae6a550..c3d1773 100644
--- a/src/3rdparty/v8/samples/process.cc
+++ b/src/3rdparty/v8/samples/process.cc
@@ -116,11 +116,13 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
// Utility methods for wrapping C++ objects as JavaScript objects,
// and going back again.
- static Handle<Object> WrapMap(map<string, string>* obj);
+ Handle<Object> WrapMap(map<string, string>* obj);
static map<string, string>* UnwrapMap(Handle<Object> obj);
- static Handle<Object> WrapRequest(HttpRequest* obj);
+ Handle<Object> WrapRequest(HttpRequest* obj);
static HttpRequest* UnwrapRequest(Handle<Object> obj);
+ Isolate* GetIsolate() { return context_->GetIsolate(); }
+
Handle<String> script_;
Persistent<Context> context_;
Persistent<Function> process_;
@@ -187,7 +189,7 @@ bool JsHttpRequestProcessor::Initialize(map<string, string>* opts,
// Store the function in a Persistent handle, since we also want
// that to remain after this call returns
- process_ = Persistent<Function>::New(process_fun);
+ process_ = Persistent<Function>::New(GetIsolate(), process_fun);
// All done; all went well
return true;
@@ -273,8 +275,9 @@ JsHttpRequestProcessor::~JsHttpRequestProcessor() {
// Dispose the persistent handles. When noone else has any
// references to the objects stored in the handles they will be
// automatically reclaimed.
- context_.Dispose();
- process_.Dispose();
+ v8::Isolate* isolate = GetIsolate();
+ context_.Dispose(isolate);
+ process_.Dispose(isolate);
}
@@ -296,7 +299,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
// It only has to be created once, which we do on demand.
if (map_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeMapTemplate();
- map_template_ = Persistent<ObjectTemplate>::New(raw_template);
+ map_template_ = Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = map_template_;
@@ -401,7 +404,8 @@ Handle<Object> JsHttpRequestProcessor::WrapRequest(HttpRequest* request) {
// It only has to be created once, which we do on demand.
if (request_template_.IsEmpty()) {
Handle<ObjectTemplate> raw_template = MakeRequestTemplate();
- request_template_ = Persistent<ObjectTemplate>::New(raw_template);
+ request_template_ =
+ Persistent<ObjectTemplate>::New(GetIsolate(), raw_template);
}
Handle<ObjectTemplate> templ = request_template_;
diff --git a/src/3rdparty/v8/samples/shell.cc b/src/3rdparty/v8/samples/shell.cc
index 821ef75..e9057f9 100644
--- a/src/3rdparty/v8/samples/shell.cc
+++ b/src/3rdparty/v8/samples/shell.cc
@@ -72,14 +72,14 @@ int main(int argc, char* argv[]) {
v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) {
- printf("Error creating context\n");
+ fprintf(stderr, "Error creating context\n");
return 1;
}
context->Enter();
result = RunMain(argc, argv);
if (run_shell) RunShell(context);
context->Exit();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
v8::V8::Dispose();
return result;
@@ -226,7 +226,8 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
- printf("Warning: unknown flag %s.\nTry --help for options\n", str);
+ fprintf(stderr,
+ "Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
@@ -237,7 +238,7 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
- printf("Error reading '%s'\n", str);
+ fprintf(stderr, "Error reading '%s'\n", str);
continue;
}
if (!ExecuteString(source, file_name, false, true)) return 1;
@@ -249,20 +250,20 @@ int RunMain(int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
- printf("V8 version %s [sample shell]\n", v8::V8::GetVersion());
+ fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
while (true) {
char buffer[kBufferSize];
- printf("> ");
+ fprintf(stderr, "> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str), name, true, true);
}
- printf("\n");
+ fprintf(stderr, "\n");
}
@@ -310,31 +311,31 @@ void ReportException(v8::TryCatch* try_catch) {
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
- printf("%s\n", exception_string);
+ fprintf(stderr, "%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
- printf("%s:%i: %s\n", filename_string, linenum, exception_string);
+ fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline);
- printf("%s\n", sourceline_string);
+ fprintf(stderr, "%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
for (int i = 0; i < start; i++) {
- printf(" ");
+ fprintf(stderr, " ");
}
int end = message->GetEndColumn();
for (int i = start; i < end; i++) {
- printf("^");
+ fprintf(stderr, "^");
}
- printf("\n");
+ fprintf(stderr, "\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
- printf("%s\n", stack_trace_string);
+ fprintf(stderr, "%s\n", stack_trace_string);
}
}
}
diff --git a/src/3rdparty/v8/src/SConscript b/src/3rdparty/v8/src/SConscript
index 16bfb55..772ac4e 100755
--- a/src/3rdparty/v8/src/SConscript
+++ b/src/3rdparty/v8/src/SConscript
@@ -84,12 +84,12 @@ SOURCES = {
global-handles.cc
handles.cc
heap-profiler.cc
+ heap-snapshot-generator.cc
heap.cc
hydrogen-instructions.cc
hydrogen.cc
ic.cc
incremental-marking.cc
- inspector.cc
interface.cc
interpreter-irregexp.cc
isolate.cc
@@ -97,7 +97,6 @@ SOURCES = {
lithium-allocator.cc
lithium.cc
liveedit.cc
- liveobjectlist.cc
log-utils.cc
log.cc
mark-compact.cc
@@ -328,6 +327,7 @@ debug-debugger.js
EXPERIMENTAL_LIBRARY_FILES = '''
+symbol.js
proxy.js
collection.js
'''.split()
diff --git a/src/3rdparty/v8/src/accessors.cc b/src/3rdparty/v8/src/accessors.cc
index c2f245c..57062be 100644
--- a/src/3rdparty/v8/src/accessors.cc
+++ b/src/3rdparty/v8/src/accessors.cc
@@ -42,8 +42,8 @@ namespace internal {
template <class C>
-static C* FindInstanceOf(Object* obj) {
- for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) {
+static C* FindInstanceOf(Isolate* isolate, Object* obj) {
+ for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
if (Is<C>(cur)) return C::cast(cur);
}
return NULL;
@@ -77,7 +77,7 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) {
MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
// Traverse the prototype chain until we reach an array.
- JSArray* holder = FindInstanceOf<JSArray>(object);
+ JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object);
return holder == NULL ? Smi::FromInt(0) : holder->length();
}
@@ -95,47 +95,6 @@ Object* Accessors::FlattenNumber(Object* value) {
}
-static MaybeObject* ArraySetLengthObserved(Isolate* isolate,
- Handle<JSArray> array,
- Handle<Object> new_length_handle) {
- List<Handle<String> > indices;
- List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(array->length(), isolate);
- uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- uint32_t new_length = 0;
- CHECK(new_length_handle->ToArrayIndex(&new_length));
- // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
- // Find another way to iterate over arrays with dictionary elements.
- for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- PropertyAttributes attributes = array->GetLocalElementAttribute(i);
- if (attributes == ABSENT) continue;
- // A non-configurable property will cause the truncation operation to
- // stop at this index.
- if (attributes == DONT_DELETE) break;
- // TODO(adamk): Don't fetch the old value if it's an accessor.
- old_values.Add(Object::GetElement(array, i));
- indices.Add(isolate->factory()->Uint32ToString(i));
- }
-
- MaybeObject* result = array->SetElementsLength(*new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
-
- CHECK(array->length()->ToArrayIndex(&new_length));
- if (old_length != new_length) {
- for (int i = 0; i < indices.length(); ++i) {
- JSObject::EnqueueChangeRecord(
- array, "deleted", indices[i], old_values[i]);
- }
- JSObject::EnqueueChangeRecord(
- array, "updated", isolate->factory()->length_symbol(),
- old_length_handle);
- }
- return *hresult;
-}
-
-
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
@@ -144,7 +103,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributes(
- isolate->heap()->length_symbol(), value, NONE);
+ isolate->heap()->length_string(), value, NONE);
}
value = FlattenNumber(value);
@@ -163,11 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- if (FLAG_harmony_observation && array_handle->map()->is_observed()) {
- return ArraySetLengthObserved(isolate, array_handle, uint32_v);
- } else {
- return array_handle->SetElementsLength(*uint32_v);
- }
+ return array_handle->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -428,13 +383,14 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = {
MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+ Script* raw_script = Script::cast(JSValue::cast(object)->value());
+ HandleScope scope(raw_script->GetIsolate());
+ Handle<Script> script(raw_script);
// If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
- return HEAP->undefined_value();
+ return script->GetHeap()->undefined_value();
}
// Get the function from where eval was called and find the source position
@@ -486,18 +442,19 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
- Heap* heap = Isolate::Current()->heap();
- JSFunction* function = FindInstanceOf<JSFunction>(object);
- if (function == NULL) return heap->undefined_value();
+ Isolate* isolate = Isolate::Current();
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
+ if (function == NULL) return isolate->heap()->undefined_value();
while (!function->should_have_prototype()) {
- function = FindInstanceOf<JSFunction>(function->GetPrototype());
+ function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype());
// There has to be one because we hit the getter.
ASSERT(function != NULL);
}
if (!function->has_prototype()) {
Object* prototype;
- { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
+ { MaybeObject* maybe_prototype
+ = isolate->heap()->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
@@ -510,24 +467,46 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
- Object* value,
+ Object* value_raw,
void*) {
- Heap* heap = object->GetHeap();
- JSFunction* function = FindInstanceOf<JSFunction>(object);
- if (function == NULL) return heap->undefined_value();
- if (!function->should_have_prototype()) {
+ Isolate* isolate = object->GetIsolate();
+ Heap* heap = isolate->heap();
+ JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object);
+ if (function_raw == NULL) return heap->undefined_value();
+ if (!function_raw->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
- value,
+ return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(),
+ value_raw,
NONE);
}
- Object* prototype;
- { MaybeObject* maybe_prototype = function->SetPrototype(value);
- if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
+ HandleScope scope(isolate);
+ Handle<JSFunction> function(function_raw, isolate);
+ Handle<Object> value(value_raw, isolate);
+
+ Handle<Object> old_value;
+ bool is_observed =
+ FLAG_harmony_observation &&
+ *function == object &&
+ function->map()->is_observed();
+ if (is_observed) {
+ if (function->has_prototype())
+ old_value = handle(function->prototype(), isolate);
+ else
+ old_value = isolate->factory()->NewFunctionPrototype(function);
}
- ASSERT(function->prototype() == value);
- return function;
+
+ Handle<Object> result;
+ MaybeObject* maybe_result = function->SetPrototype(*value);
+ if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
+ ASSERT(function->prototype() == *value);
+
+ if (is_observed && !old_value->SameValue(*value)) {
+ JSObject::EnqueueChangeRecord(
+ function, "updated", isolate->factory()->prototype_string(), old_value);
+ }
+
+ return *function;
}
@@ -544,7 +523,8 @@ const AccessorDescriptor Accessors::FunctionPrototype = {
MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
- JSFunction* function = FindInstanceOf<JSFunction>(object);
+ Isolate* isolate = Isolate::Current();
+ JSFunction* function = FindInstanceOf<JSFunction>(isolate, object);
if (function == NULL) return Smi::FromInt(0);
// Check if already compiled.
if (function->shared()->is_compiled()) {
@@ -552,7 +532,7 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) {
}
// If the function isn't compiled yet, the length is not computed correctly
// yet. Compile it now and return the right length.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSFunction> handle(function);
if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) {
return Smi::FromInt(handle->shared()->length());
@@ -574,8 +554,11 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
- return holder == NULL ? HEAP->undefined_value() : holder->shared()->name();
+ Isolate* isolate = Isolate::Current();
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
+ return holder == NULL
+ ? isolate->heap()->undefined_value()
+ : holder->shared()->name();
}
@@ -595,7 +578,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
- Factory* factory = Isolate::Current()->factory();
+ Isolate* isolate = inlined_function->GetIsolate();
+ Factory* factory = isolate->factory();
Vector<SlotRef> args_slots =
SlotRef::ComputeSlotMappingForArguments(
frame,
@@ -606,7 +590,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
- Handle<Object> value = args_slots[i].GetValue();
+ Handle<Object> value = args_slots[i].GetValue(isolate);
array->set(i, *value);
}
arguments->set_elements(*array);
@@ -620,7 +604,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
@@ -646,7 +630,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that.
Handle<ScopeInfo> scope_info(function->shared()->scope_info());
int index = scope_info->StackSlotIndex(
- isolate->heap()->arguments_symbol());
+ isolate->heap()->arguments_string());
if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
@@ -694,19 +678,6 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
-static MaybeObject* CheckNonStrictCallerOrThrow(
- Isolate* isolate,
- JSFunction* caller) {
- DisableAssertNoAllocation enable_allocation;
- if (!caller->shared()->is_classic_mode()) {
- return isolate->Throw(
- *isolate->factory()->NewTypeError("strict_caller",
- HandleVector<Object>(NULL, 0)));
- }
- return caller;
-}
-
-
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
@@ -757,7 +728,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
AssertNoAllocation no_alloc;
- JSFunction* holder = FindInstanceOf<JSFunction>(object);
+ JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
if (holder == NULL) return isolate->heap()->undefined_value();
if (holder->shared()->native()) return isolate->heap()->null_value();
Handle<JSFunction> function(holder, isolate);
@@ -793,7 +764,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
- return CheckNonStrictCallerOrThrow(isolate, caller);
+ // Censor if the caller is not a classic mode function.
+ // Change from ES5, which used to throw, see:
+ // https://bugs.ecmascript.org/show_bug.cgi?id=310
+ if (!caller->shared()->is_classic_mode()) {
+ return isolate->heap()->null_value();
+ }
+
+ return caller;
}
@@ -809,22 +787,49 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- Object* current = receiver->GetPrototype();
+static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
+ Object* receiver) {
+ Object* current = receiver->GetPrototype(isolate);
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype();
+ current = current->GetPrototype(isolate);
}
return current;
}
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
- Object* value,
+MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
+ return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver);
+}
+
+
+MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
+ Object* value_raw,
void*) {
- const bool skip_hidden_prototypes = true;
+ const bool kSkipHiddenPrototypes = true;
// To be consistent with other Set functions, return the value.
- return receiver->SetPrototype(value, skip_hidden_prototypes);
+ if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
+ return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
+
+ Isolate* isolate = receiver_raw->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver(receiver_raw);
+ Handle<Object> value(value_raw, isolate);
+ Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
+ isolate);
+
+ MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
+ isolate);
+ if (!new_value->SameValue(*old_value)) {
+ JSObject::EnqueueChangeRecord(receiver, "prototype",
+ isolate->factory()->proto_string(),
+ old_value);
+ }
+ return *hresult;
}
@@ -847,15 +852,15 @@ static v8::Handle<v8::Value> ModuleGetExport(
ASSERT(context->IsModuleContext());
int slot = info.Data()->Int32Value();
Object* value = context->get(slot);
+ Isolate* isolate = instance->GetIsolate();
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- Isolate* isolate = instance->GetIsolate();
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined",
HandleVector(&name, 1)));
return v8::Handle<v8::Value>();
}
- return v8::Utils::ToLocal(Handle<Object>(value));
+ return v8::Utils::ToLocal(Handle<Object>(value, isolate));
}
@@ -885,7 +890,7 @@ Handle<AccessorInfo> Accessors::MakeModuleExport(
int index,
PropertyAttributes attributes) {
Factory* factory = name->GetIsolate()->factory();
- Handle<AccessorInfo> info = factory->NewAccessorInfo();
+ Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(true);
info->set_all_can_write(true);
diff --git a/src/3rdparty/v8/src/api.cc b/src/3rdparty/v8/src/api.cc
index cbb3a04..1804a50 100644
--- a/src/3rdparty/v8/src/api.cc
+++ b/src/3rdparty/v8/src/api.cc
@@ -42,6 +42,7 @@
#include "execution.h"
#include "global-handles.h"
#include "heap-profiler.h"
+#include "heap-snapshot-generator-inl.h"
#include "messages.h"
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include "natives.h"
@@ -128,8 +129,13 @@ namespace v8 {
static void DefaultFatalErrorHandler(const char* location,
const char* message) {
- i::VMState __state__(i::Isolate::Current(), i::OTHER);
- API_Fatal(location, message);
+ i::Isolate* isolate = i::Isolate::Current();
+ if (isolate->IsInitialized()) {
+ i::VMState __state__(isolate, i::OTHER);
+ API_Fatal(location, message);
+ } else {
+ API_Fatal(location, message);
+ }
}
@@ -202,15 +208,21 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
int end_marker;
heap_stats.end_marker = &end_marker;
i::Isolate* isolate = i::Isolate::Current();
- // BUG(1718):
- // Don't use the take_snapshot since we don't support HeapIterator here
- // without doing a special GC.
- isolate->heap()->RecordStats(&heap_stats, false);
+ if (isolate->heap()->HasBeenSetUp()) {
+ // BUG(1718): Don't use the take_snapshot since we don't support
+ // HeapIterator here without doing a special GC.
+ isolate->heap()->RecordStats(&heap_stats, false);
+ }
i::V8::SetFatalError();
FatalErrorCallback callback = GetFatalErrorHandler();
+ const char* message = "Allocation failed - process out of memory";
{
- LEAVE_V8(isolate);
- callback(location, "Allocation failed - process out of memory");
+ if (isolate->IsInitialized()) {
+ LEAVE_V8(isolate);
+ callback(location, message);
+ } else {
+ callback(location, message);
+ }
}
// If the callback returns, we stop execution.
UNREACHABLE();
@@ -615,100 +627,34 @@ bool SetResourceConstraints(ResourceConstraints* constraints) {
}
-i::Object** V8::GlobalizeReference(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
+i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL;
LOG_API(isolate, "Persistent::New");
- i::Handle<i::Object> result =
- isolate->global_handles()->Create(*obj);
+ i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
return result.location();
}
-void V8::MakeWeak(i::Object** object, void* parameters,
- WeakReferenceCallback callback) {
- i::Isolate* isolate = i::Isolate::Current();
+void V8::MakeWeak(i::Isolate* isolate,
+ i::Object** object,
+ void* parameters,
+ WeakReferenceCallback weak_reference_callback,
+ NearDeathCallback near_death_callback) {
+ ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object, parameters,
- callback);
+ isolate->global_handles()->MakeWeak(object,
+ parameters,
+ weak_reference_callback,
+ near_death_callback);
}
-void V8::ClearWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
+void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) {
LOG_API(isolate, "ClearWeak");
isolate->global_handles()->ClearWeakness(obj);
}
-void V8::MarkIndependent(i::Object** object) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MarkIndependent");
- isolate->global_handles()->MarkIndependent(object);
-}
-
-
-void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MarkIndependent");
- isolate->global_handles()->MarkIndependent(object);
-}
-
-
-void V8::MarkPartiallyDependent(i::Object** object) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MarkPartiallyDependent");
- isolate->global_handles()->MarkPartiallyDependent(object);
-}
-
-
-void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MarkPartiallyDependent");
- isolate->global_handles()->MarkPartiallyDependent(object);
-}
-
-
-bool V8::IsGlobalIndependent(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalIndependent");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsIndependent(obj);
-}
-
-
-bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "IsGlobalIndependent");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsIndependent(obj);
-}
-
-
-bool V8::IsGlobalNearDeath(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalNearDeath");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsNearDeath(obj);
-}
-
-
-bool V8::IsGlobalWeak(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "IsGlobalWeak");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsWeak(obj);
-}
-
-
-void V8::DisposeGlobal(i::Object** obj) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
-}
-
-
void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
ASSERT(isolate == i::Isolate::Current());
LOG_API(isolate, "DisposeGlobal");
@@ -758,21 +704,29 @@ void HandleScope::Leave() {
int HandleScope::NumberOfHandles() {
- EnsureInitializedForIsolate(
- i::Isolate::Current(), "HandleScope::NumberOfHandles");
- return i::HandleScope::NumberOfHandles();
+ i::Isolate* isolate = i::Isolate::Current();
+ if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) {
+ return 0;
+ }
+ return i::HandleScope::NumberOfHandles(isolate);
}
i::Object** HandleScope::CreateHandle(i::Object* value) {
- return i::HandleScope::CreateHandle(value, i::Isolate::Current());
+ return i::HandleScope::CreateHandle(i::Isolate::Current(), value);
+}
+
+
+i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
+ ASSERT(isolate == i::Isolate::Current());
+ return i::HandleScope::CreateHandle(isolate, value);
}
i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
ASSERT(value->IsHeapObject());
return reinterpret_cast<i::Object**>(
- i::HandleScope::CreateHandle(value, value->GetIsolate()));
+ i::HandleScope::CreateHandle(value->GetIsolate(), value));
}
@@ -810,33 +764,77 @@ void Context::Exit() {
}
-void Context::SetData(v8::Handle<Value> data) {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
- i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
- ASSERT(env->IsNativeContext());
- if (env->IsNativeContext()) {
- env->set_data(*raw_data);
- }
+static void* DecodeSmiToAligned(i::Object* value, const char* location) {
+ ApiCheck(value->IsSmi(), location, "Not a Smi");
+ return reinterpret_cast<void*>(value);
}
-v8::Local<v8::Value> Context::GetData() {
- i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
- return Local<Value>();
- }
- ASSERT(env->IsNativeContext());
- if (!env->IsNativeContext()) {
- return Local<Value>();
+static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
+ i::Smi* smi = reinterpret_cast<i::Smi*>(value);
+ ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
+ return smi;
+}
+
+
+static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
+ int index,
+ bool can_grow,
+ const char* location) {
+ i::Handle<i::Context> env = Utils::OpenHandle(context);
+ bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
+ ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
+ ApiCheck(index >= 0, location, "Negative index");
+ if (!ok) return i::Handle<i::FixedArray>();
+ i::Handle<i::FixedArray> data(env->embedder_data());
+ if (index < data->length()) return data;
+ if (!can_grow) {
+ Utils::ReportApiFailure(location, "Index too large");
+ return i::Handle<i::FixedArray>();
}
- i::Handle<i::Object> result(env->data(), isolate);
+ int new_size = i::Max(index, data->length() << 1) + 1;
+ data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
+ env->set_embedder_data(*data);
+ return data;
+}
+
+
+v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
+ const char* location = "v8::Context::GetEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ if (data.is_null()) return Local<Value>();
+ i::Handle<i::Object> result(data->get(index), data->GetIsolate());
return Utils::ToLocal(result);
}
+void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
+ const char* location = "v8::Context::SetEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
+ if (data.is_null()) return;
+ i::Handle<i::Object> val = Utils::OpenHandle(*value);
+ data->set(index, *val);
+ ASSERT_EQ(*Utils::OpenHandle(*value),
+ *Utils::OpenHandle(*GetEmbedderData(index)));
+}
+
+
+void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
+ const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
+ if (data.is_null()) return NULL;
+ return DecodeSmiToAligned(data->get(index), location);
+}
+
+
+void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
+ const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
+ i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
+ data->set(index, EncodeAlignedAsSmi(value, location));
+ ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
+}
+
+
i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
@@ -858,7 +856,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
}
// Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result);
+ i::Handle<i::Object> handle(result, isolate_);
return handle.location();
}
@@ -942,7 +940,7 @@ void Template::Set(v8::Handle<String> name, v8::Handle<Data> value,
if (IsDeadCheck(isolate, "v8::Template::Set()")) return;
ENTER_V8(isolate);
i::HandleScope scope(isolate);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list());
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list(), isolate);
if (list->IsUndefined()) {
list = NeanderArray().value();
Utils::OpenHandle(this)->set_property_list(*list);
@@ -968,7 +966,8 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() {
return Local<ObjectTemplate>();
}
ENTER_V8(isolate);
- i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template());
+ i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(),
+ isolate);
if (result->IsUndefined()) {
result = Utils::OpenHandle(*ObjectTemplate::New());
Utils::OpenHandle(this)->set_prototype_template(*result);
@@ -986,7 +985,7 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) {
Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
- v8::Handle<Value> data, v8::Handle<Signature> signature) {
+ v8::Handle<Value> data, v8::Handle<Signature> signature, int length) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()");
LOG_API(isolate, "FunctionTemplate::New");
@@ -1003,6 +1002,7 @@ Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback,
if (data.IsEmpty()) data = v8::Undefined();
Utils::ToLocal(obj)->SetCallHandler(callback, data);
}
+ obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
@@ -1110,7 +1110,8 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo(
v8::AccessControl settings,
v8::PropertyAttribute attributes,
v8::Handle<AccessorSignature> signature) {
- i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo();
+ i::Handle<i::ExecutableAccessorInfo> obj =
+ FACTORY->NewExecutableAccessorInfo();
SET_FIELD_WRAPPED(obj, set_getter, getter);
SET_FIELD_WRAPPED(obj, set_setter, setter);
if (data.IsEmpty()) data = v8::Undefined();
@@ -1146,7 +1147,8 @@ void FunctionTemplate::AddInstancePropertyAccessor(
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data,
settings, attributes,
signature);
- i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
+ i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors(),
+ isolate);
if (list->IsUndefined()) {
list = NeanderArray().value();
Utils::OpenHandle(this)->set_property_accessors(*list);
@@ -1173,6 +1175,14 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() {
}
+void FunctionTemplate::SetLength(int length) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return;
+ ENTER_V8(isolate);
+ Utils::OpenHandle(this)->set_length(length);
+}
+
+
void FunctionTemplate::SetClassName(Handle<String> name) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return;
@@ -1193,7 +1203,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
return;
}
ENTER_V8(isolate);
@@ -1352,12 +1362,13 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name,
}
-void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
+void ObjectTemplate::SetNamedPropertyHandler(
+ NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
return;
@@ -1378,14 +1389,16 @@ void ObjectTemplate::SetNamedPropertyHandler(NamedPropertyGetter getter,
}
-void ObjectTemplate::SetFallbackPropertyHandler(NamedPropertyGetter getter,
- NamedPropertySetter setter,
- NamedPropertyQuery query,
- NamedPropertyDeleter remover,
- NamedPropertyEnumerator enumerator,
- Handle<Value> data) {
+void ObjectTemplate::SetFallbackPropertyHandler(
+ NamedPropertyGetter getter,
+ NamedPropertySetter setter,
+ NamedPropertyQuery query,
+ NamedPropertyDeleter remover,
+ NamedPropertyEnumerator enumerator,
+ Handle<Value> data) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetNamedPropertyHandler()")) {
+ if (IsDeadCheck(isolate,
+ "v8::ObjectTemplate::SetFallbackPropertyHandler()")) {
return;
}
ENTER_V8(isolate);
@@ -1522,8 +1535,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
}
-bool ObjectTemplate::HasExternalResource()
-{
+bool ObjectTemplate::HasExternalResource() {
if (IsDeadCheck(Utils::OpenHandle(this)->GetIsolate(),
"v8::ObjectTemplate::HasExternalResource()")) {
return 0;
@@ -1532,8 +1544,7 @@ bool ObjectTemplate::HasExternalResource()
}
-void ObjectTemplate::SetHasExternalResource(bool value)
-{
+void ObjectTemplate::SetHasExternalResource(bool value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetHasExternalResource()")) {
return;
@@ -1541,19 +1552,18 @@ void ObjectTemplate::SetHasExternalResource(bool value)
ENTER_V8(isolate);
if (value) {
EnsureConstructor(this);
- }
- if (value) {
- Utils::OpenHandle(this)->set_has_external_resource(i::Smi::FromInt(1));
+ Utils::OpenHandle(this)->set_has_external_resource(i::Smi::FromInt(1));
} else {
- Utils::OpenHandle(this)->set_has_external_resource(Utils::OpenHandle(this)->GetHeap()->undefined_value());
+ Utils::OpenHandle(this)->set_has_external_resource(
+ Utils::OpenHandle(this)->GetHeap()->undefined_value());
}
}
-void ObjectTemplate::MarkAsUseUserObjectComparison()
-{
+void ObjectTemplate::MarkAsUseUserObjectComparison() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::ObjectTemplate::MarkAsUseUserObjectComparison()")) {
+ if (IsDeadCheck(isolate,
+ "v8::ObjectTemplate::MarkAsUseUserObjectComparison()")) {
return;
}
ENTER_V8(isolate);
@@ -1682,7 +1692,11 @@ Local<Script> Script::Compile(v8::Handle<String> source,
ON_BAILOUT(isolate, "v8::Script::Compile()", return Local<Script>());
LOG_API(isolate, "Script::Compile");
ENTER_V8(isolate);
- Local<Script> generic = New(source, origin, pre_data, script_data, compile_flags);
+ Local<Script> generic = New(source,
+ origin,
+ pre_data,
+ script_data,
+ compile_flags);
if (generic.IsEmpty())
return generic;
i::Handle<i::Object> obj = Utils::OpenHandle(*generic);
@@ -1714,6 +1728,8 @@ Local<Value> Script::Run(Handle<Object> qml) {
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@@ -1731,8 +1747,13 @@ Local<Value> Script::Run(Handle<Object> qml) {
i::Handle<i::Object> qmlglobal = Utils::OpenHandle(*qml, true);
i::Handle<i::Object> receiver(
isolate->context()->global_proxy(), isolate);
- i::Handle<i::Object> result =
- i::Execution::Call(fun, receiver, 0, NULL, &has_pending_exception, false, qmlglobal);
+ i::Handle<i::Object> result = i::Execution::Call(fun,
+ receiver,
+ 0,
+ NULL,
+ &has_pending_exception,
+ false,
+ qmlglobal);
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
raw_result = *result;
}
@@ -1764,10 +1785,10 @@ Local<Value> Script::Id() {
i::HandleScope scope(isolate);
i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this);
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- i::Handle<i::Object> id(script->id());
+ i::Handle<i::Object> id(script->id(), isolate);
raw_id = *id;
}
- i::Handle<i::Object> id(raw_id);
+ i::Handle<i::Object> id(raw_id, isolate);
return Utils::ToLocal(id);
}
@@ -1851,9 +1872,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const {
if (!raw_obj->IsJSObject()) return v8::Local<Value>();
i::HandleScope scope(isolate_);
i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
- i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack");
+ i::Handle<i::String> name = isolate_->factory()->stack_string();
if (!obj->HasProperty(*name)) return v8::Local<Value>();
- i::Handle<i::Object> value = i::GetProperty(obj, name);
+ i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name);
if (value.is_null()) return v8::Local<Value>();
return v8::Utils::ToLocal(scope.CloseAndEscape(value));
} else {
@@ -1899,7 +1920,7 @@ Local<String> Message::Get() const {
ENTER_V8(isolate);
HandleScope scope;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj);
+ i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj);
Local<String> result = Utils::ToLocal(raw_result);
return scope.Close(result);
}
@@ -1916,8 +1937,10 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.name.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name());
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
+ isolate));
+ i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
+ isolate);
return scope.Close(Utils::ToLocal(resource_name));
}
@@ -1933,8 +1956,9 @@ v8::Handle<Value> Message::GetScriptData() const {
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script()));
- i::Handle<i::Object> data(i::Script::cast(script->value())->data());
+ i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
+ isolate));
+ i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate);
return scope.Close(Utils::ToLocal(data));
}
@@ -1948,7 +1972,7 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
HandleScope scope;
i::Handle<i::JSMessageObject> message =
i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> stackFramesObj(message->stack_frames());
+ i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
i::Handle<i::JSArray> stackTrace =
i::Handle<i::JSArray>::cast(stackFramesObj);
@@ -1962,7 +1986,8 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> argv[],
bool* has_pending_exception) {
i::Isolate* isolate = i::Isolate::Current();
- i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name);
+ i::Handle<i::String> fmt_str =
+ isolate->factory()->InternalizeUtf8String(name);
i::Object* object_fun =
isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str);
i::Handle<i::JSFunction> fun =
@@ -2313,7 +2338,7 @@ bool Value::IsExternal() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
return false;
}
- return Utils::OpenHandle(this)->IsForeign();
+ return Utils::OpenHandle(this)->IsExternal();
}
@@ -2355,7 +2380,7 @@ bool Value::IsDate() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Date_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Date_string());
}
@@ -2363,7 +2388,7 @@ bool Value::IsStringObject() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->String_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->String_string());
}
@@ -2371,23 +2396,27 @@ bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Number_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Number_string());
}
static i::Object* LookupBuiltin(i::Isolate* isolate,
const char* builtin_name) {
- i::Handle<i::String> symbol =
- isolate->factory()->LookupAsciiSymbol(builtin_name);
+ i::Handle<i::String> string =
+ isolate->factory()->InternalizeUtf8String(builtin_name);
i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object();
- return builtins->GetPropertyNoExceptionThrown(*symbol);
+ return builtins->GetPropertyNoExceptionThrown(*string);
}
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
- return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
+ i::Object* constr = obj->map()->constructor();
+ if (!constr->IsJSFunction()) return false;
+ i::JSFunction* func = i::JSFunction::cast(constr);
+ return func->shared()->native() &&
+ constr == LookupBuiltin(isolate, class_name);
}
@@ -2414,7 +2443,7 @@ bool Value::IsBooleanObject() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol());
+ return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
}
@@ -2427,7 +2456,7 @@ bool Value::IsRegExp() const {
bool Value::IsError() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsError()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return obj->HasSpecificClassOf(HEAP->Error_symbol());
+ return obj->HasSpecificClassOf(HEAP->Error_string());
}
@@ -2502,7 +2531,7 @@ Local<Boolean> Value::ToBoolean() const {
}
LOG_API(isolate, "ToBoolean");
ENTER_V8(isolate);
- i::Handle<i::Object> val = i::Execution::ToBoolean(obj);
+ i::Handle<i::Object> val = i::Execution::ToBoolean(isolate, obj);
return Local<Boolean>(ToApi<Boolean>(val));
}
}
@@ -2548,8 +2577,7 @@ Local<Integer> Value::ToInteger() const {
void External::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
- i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->IsForeign(),
+ ApiCheck(Utils::OpenHandle(that)->IsExternal(),
"v8::External::Cast()",
"Could not convert to external");
}
@@ -2613,7 +2641,7 @@ void v8::Date::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Date::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()),
"v8::Date::Cast()",
"Could not convert to date");
}
@@ -2623,7 +2651,7 @@ void v8::StringObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
"Could not convert to StringObject");
}
@@ -2633,7 +2661,7 @@ void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
@@ -2643,7 +2671,7 @@ void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
- ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()),
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
"v8::BooleanObject::Cast()",
"Could not convert to BooleanObject");
}
@@ -2667,7 +2695,7 @@ bool Value::BooleanValue() const {
if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false;
LOG_API(isolate, "BooleanValue");
ENTER_V8(isolate);
- i::Handle<i::Object> value = i::Execution::ToBoolean(obj);
+ i::Handle<i::Object> value = i::Execution::ToBoolean(isolate, obj);
return value->IsTrue();
}
}
@@ -2770,7 +2798,7 @@ Local<Uint32> Value::ToArrayIndex() const {
if (str->AsArrayIndex(&index)) {
i::Handle<i::Object> value;
if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) {
- value = i::Handle<i::Object>(i::Smi::FromInt(index));
+ value = i::Handle<i::Object>(i::Smi::FromInt(index), isolate);
} else {
value = isolate->factory()->NewNumber(index);
}
@@ -2894,6 +2922,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
+ isolate,
self,
key_obj,
value_obj,
@@ -2978,7 +3007,7 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) {
i::Handle<i::Object> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> result = i::GetProperty(self, key_obj);
+ i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
return Utils::ToLocal(result);
@@ -3024,7 +3053,7 @@ Local<Value> v8::Object::GetPrototype() {
return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> result(self->GetPrototype());
+ i::Handle<i::Object> result(self->GetPrototype(isolate), isolate);
return Utils::ToLocal(result);
}
@@ -3113,7 +3142,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> name(self->class_name());
+ i::Handle<i::Object> name(self->class_name(), isolate);
// Native implementation of Object.prototype.toString (v8natives.js):
// var c = %ClassOf(this);
@@ -3125,7 +3154,7 @@ Local<String> v8::Object::ObjectProtoToString() {
} else {
i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (class_name->IsEqualTo(i::CStrVector("Arguments"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
return v8::String::New("[object Object]");
} else {
@@ -3166,7 +3195,7 @@ Local<Value> v8::Object::GetConstructor() {
return Local<v8::Function>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> constructor(self->GetConstructor());
+ i::Handle<i::Object> constructor(self->GetConstructor(), isolate);
return Utils::ToLocal(constructor);
}
@@ -3432,10 +3461,11 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
+ i::Handle<i::String> key_string =
+ isolate->factory()->InternalizeString(key_obj);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
i::Handle<i::Object> result =
- i::JSObject::SetHiddenProperty(self, key_symbol, value_obj);
+ i::JSObject::SetHiddenProperty(self, key_string, value_obj);
return *result == *self;
}
@@ -3447,8 +3477,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
+ i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate);
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3461,8 +3491,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- self->DeleteHiddenProperty(*key_symbol);
+ i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj);
+ self->DeleteHiddenProperty(*key_string);
return true;
}
@@ -3685,6 +3715,8 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
return Local<v8::Value>());
LOG_API(isolate, "Object::CallAsFunction");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -3716,6 +3748,8 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Object::CallAsConstructor");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3758,6 +3792,8 @@ Local<v8::Object> Function::NewInstance(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Function::NewInstance");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3776,6 +3812,8 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
LOG_API(isolate, "Function::Call");
ENTER_V8(isolate);
+ i::Logger::TimerEventScope timer_scope(
+ isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@@ -3789,7 +3827,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
raw_result = *returned;
}
- i::Handle<i::Object> result(raw_result);
+ i::Handle<i::Object> result(raw_result, isolate);
return Utils::ToLocal(result);
}
@@ -3805,13 +3843,15 @@ void Function::SetName(v8::Handle<v8::String> name) {
Handle<Value> Function::GetName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name()));
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(),
+ func->GetIsolate()));
}
Handle<Value> Function::GetInferredName() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
- return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name()));
+ return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(),
+ func->GetIsolate()));
}
@@ -3857,7 +3897,7 @@ Handle<Value> Function::GetScriptId() const {
if (!func->shared()->script()->IsScript())
return v8::Undefined();
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->id()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate()));
}
int String::Length() const {
@@ -3866,109 +3906,238 @@ int String::Length() const {
return str->length();
}
+bool String::MayContainNonAscii() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
+ return false;
+ }
+ return !str->HasOnlyAsciiChars();
+}
+
+
+bool String::IsOneByte() const {
+ i::Handle<i::String> str = Utils::OpenHandle(this);
+ if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) {
+ return false;
+ }
+ return str->IsOneByteConvertible();
+}
+
+
+class Utf8LengthVisitor {
+ public:
+ explicit Utf8LengthVisitor()
+ : utf8_length_(0),
+ last_character_(unibrow::Utf16::kNoPreviousCharacter) {}
+
+ inline int GetLength() {
+ return utf8_length_;
+ }
+
+ template<typename Char>
+ inline void Visit(const Char* chars, unsigned length) {
+ ASSERT(length > 0);
+ // TODO(dcarney) Add back ascii fast path.
+ int utf8_length = 0;
+ int last_character = last_character_;
+ for (unsigned i = 0; i < length; i++) {
+ uint16_t c = chars[i];
+ utf8_length += unibrow::Utf8::Length(c, last_character);
+ last_character = c;
+ }
+ last_character_ = last_character;
+ utf8_length_ += utf8_length;
+ }
+
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ Visit(chars, length);
+ }
+
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ Visit(chars, length);
+ }
+
+ private:
+ int utf8_length_;
+ int last_character_;
+ DISALLOW_COPY_AND_ASSIGN(Utf8LengthVisitor);
+};
+
+
+static int Utf8Length(i::String* str, i::Isolate* isolate) {
+ unsigned length = static_cast<unsigned>(str->length());
+ if (length == 0) return 0;
+ int32_t type = str->map()->instance_type();
+ Utf8LengthVisitor visitor;
+ // Non ConsString branch.
+ if ((type & i::kStringRepresentationMask) != i::kConsStringTag) {
+ i::ConsStringNullOp null_op;
+ i::String::Visit(str, 0, visitor, null_op, type, length);
+ return visitor.GetLength();
+ }
+ i::ConsStringIteratorOp* op = isolate->write_iterator();
+ unsigned offset = 0;
+ i::String* leaf = op->Operate(str, &offset, &type, &length);
+ ASSERT(leaf != NULL);
+ while (leaf != NULL) {
+ i::ConsStringNullOp null_op;
+ ASSERT(offset == 0);
+ i::String::Visit(leaf, 0, visitor, null_op, type, length);
+ leaf = op->ContinueOperation(&type, &length);
+ }
+ return visitor.GetLength();
+}
+
int String::Utf8Length() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0;
- return i::Utf8Length(str);
-}
-
-
-// Will fail with a negative answer if the recursion depth is too high.
-static int RecursivelySerializeToUtf8(i::String* string,
- char* buffer,
- int start,
- int end,
- int recursion_budget,
- int32_t previous_character,
- int32_t* last_character) {
- int utf8_bytes = 0;
- while (true) {
- if (string->IsAsciiRepresentation()) {
- i::String::WriteToFlat(string, buffer, start, end);
- *last_character = unibrow::Utf16::kNoPreviousCharacter;
- return utf8_bytes + end - start;
+ i::Isolate* isolate = str->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0;
+ return v8::Utf8Length(*str, isolate);
+}
+
+
+class Utf8WriterVisitor {
+ public:
+ Utf8WriterVisitor(char* buffer, int capacity)
+ : early_termination_(false),
+ last_character_(unibrow::Utf16::kNoPreviousCharacter),
+ buffer_(buffer),
+ start_(buffer),
+ capacity_(capacity),
+ utf16_chars_read_(0) {
+ }
+
+ static int WriteEndCharacter(uint16_t character,
+ int last_character,
+ int remaining,
+ char* const buffer) {
+ using namespace unibrow;
+ ASSERT(remaining > 0);
+ // We can't use a local buffer here because Encode needs to modify
+ // previous characters in the stream. We know, however, that
+ // exactly one character will be advanced.
+ if (Utf16::IsTrailSurrogate(character) &&
+ Utf16::IsLeadSurrogate(last_character)) {
+ int written = Utf8::Encode(buffer, character, last_character);
+ ASSERT(written == 1);
+ return written;
+ }
+ // Use a scratch buffer to check the required characters.
+ char temp_buffer[Utf8::kMaxEncodedSize];
+ // Can't encode using last_character as gcc has array bounds issues.
+ int written = Utf8::Encode(temp_buffer,
+ character,
+ unibrow::Utf16::kNoPreviousCharacter);
+ // Won't fit.
+ if (written > remaining) return 0;
+ // Copy over the character from temp_buffer.
+ for (int j = 0; j < written; j++) {
+ buffer[j] = temp_buffer[j];
}
- switch (i::StringShape(string).representation_tag()) {
- case i::kExternalStringTag: {
- const uint16_t* data = i::ExternalTwoByteString::cast(string)->
- ExternalTwoByteStringGetData(0);
- char* current = buffer;
- for (int i = start; i < end; i++) {
- uint16_t character = data[i];
- current +=
- unibrow::Utf8::Encode(current, character, previous_character);
- previous_character = character;
- }
- *last_character = previous_character;
- return static_cast<int>(utf8_bytes + current - buffer);
+ return written;
+ }
+
+ template<typename Char>
+ void Visit(const Char* chars, const int length) {
+ using namespace unibrow;
+ // TODO(dcarney): Add back ascii fast path.
+ ASSERT(!early_termination_);
+ ASSERT(length > 0);
+ // Copy state to stack.
+ char* buffer = buffer_;
+ int last_character = last_character_;
+ int i = 0;
+ // Do a fast loop where there is no exit capacity check.
+ while (true) {
+ int fast_length;
+ if (capacity_ == -1) {
+ fast_length = length;
+ } else {
+ int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+ // Need enough space to write everything but one character.
+ STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3);
+ int writable_length = (remaining_capacity - 3)/3;
+ // Need to drop into slow loop.
+ if (writable_length <= 0) break;
+ fast_length = i + writable_length;
+ if (fast_length > length) fast_length = length;
}
- case i::kSeqStringTag: {
- const uint16_t* data =
- i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0);
- char* current = buffer;
- for (int i = start; i < end; i++) {
- uint16_t character = data[i];
- current +=
- unibrow::Utf8::Encode(current, character, previous_character);
- previous_character = character;
- }
- *last_character = previous_character;
- return static_cast<int>(utf8_bytes + current - buffer);
+ // Write the characters to the stream.
+ for (; i < fast_length; i++) {
+ uint16_t character = *chars++;
+ buffer += Utf8::Encode(buffer, character, last_character);
+ last_character = character;
+ ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
}
- case i::kSlicedStringTag: {
- i::SlicedString* slice = i::SlicedString::cast(string);
- unsigned offset = slice->offset();
- string = slice->parent();
- start += offset;
- end += offset;
- continue;
+ // Array is fully written. Exit.
+ if (fast_length == length) {
+ // Write state back out to object.
+ last_character_ = last_character;
+ buffer_ = buffer;
+ utf16_chars_read_ += i;
+ return;
}
- case i::kConsStringTag: {
- i::ConsString* cons_string = i::ConsString::cast(string);
- i::String* first = cons_string->first();
- int boundary = first->length();
- if (start >= boundary) {
- // Only need RHS.
- string = cons_string->second();
- start -= boundary;
- end -= boundary;
- continue;
- } else if (end <= boundary) {
- // Only need LHS.
- string = first;
- } else {
- if (recursion_budget == 0) return -1;
- int extra_utf8_bytes =
- RecursivelySerializeToUtf8(first,
- buffer,
- start,
- boundary,
- recursion_budget - 1,
- previous_character,
- &previous_character);
- if (extra_utf8_bytes < 0) return extra_utf8_bytes;
- buffer += extra_utf8_bytes;
- utf8_bytes += extra_utf8_bytes;
- string = cons_string->second();
- start = 0;
- end -= boundary;
- }
+ }
+ ASSERT(capacity_ != -1);
+ // Slow loop. Must check capacity on each iteration.
+ int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
+ ASSERT(remaining_capacity >= 0);
+ for (; i < length && remaining_capacity > 0; i++) {
+ uint16_t character = *chars++;
+ int written = WriteEndCharacter(character,
+ last_character,
+ remaining_capacity,
+ buffer);
+ if (written == 0) {
+ early_termination_ = true;
+ break;
}
+ buffer += written;
+ remaining_capacity -= written;
+ last_character = character;
}
+ // Write state back out to object.
+ last_character_ = last_character;
+ buffer_ = buffer;
+ utf16_chars_read_ += i;
}
- UNREACHABLE();
- return 0;
-}
+ inline bool IsDone() {
+ return early_termination_;
+ }
-bool String::MayContainNonAscii() const {
- i::Handle<i::String> str = Utils::OpenHandle(this);
- if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) {
- return false;
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ Visit(chars, static_cast<int>(length));
+ }
+
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ Visit(chars, static_cast<int>(length));
}
- return !str->HasOnlyAsciiChars();
-}
+
+ inline int CompleteWrite(bool write_null, int* utf16_chars_read_out) {
+ // Write out number of utf16 characters written to the stream.
+ if (utf16_chars_read_out != NULL) {
+ *utf16_chars_read_out = utf16_chars_read_;
+ }
+ // Only null terminate if all of the string was written and there's space.
+ if (write_null &&
+ !early_termination_ &&
+ (capacity_ == -1 || (buffer_ - start_) < capacity_)) {
+ *buffer_++ = '\0';
+ }
+ return static_cast<int>(buffer_ - start_);
+ }
+
+ private:
+ bool early_termination_;
+ int last_character_;
+ char* buffer_;
+ char* const start_;
+ int capacity_;
+ int utf16_chars_read_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor);
+};
uint32_t String::Hash() const {
@@ -3986,20 +4155,20 @@ String::CompleteHashData String::CompleteHash() const {
CompleteHashData result;
result.length = str->length();
result.hash = str->Hash();
- if (str->IsSeqAsciiString() && str->IsSymbol())
+ if (str->IsSeqOneByteString() && str->IsSymbol())
result.symbol_id = i::SeqString::cast(*str)->symbol_id();
return result;
}
uint32_t String::ComputeHash(uint16_t *string, int length) {
- return i::HashSequentialString<i::uc16>(string, length, i::kZeroHashSeed) >>
+ return i::StringHasher::HashSequentialString<i::uc16>(string, length, i::kZeroHashSeed) >>
i::String::kHashShift;
}
uint32_t String::ComputeHash(char *string, int length) {
- return i::HashSequentialString<char>(string, length, i::kZeroHashSeed) >>
+ return i::StringHasher::HashSequentialString<char>(string, length, i::kZeroHashSeed) >>
i::String::kHashShift;
}
@@ -4016,123 +4185,23 @@ int String::WriteUtf8(char* buffer,
if (options & HINT_MANY_WRITES_EXPECTED) {
FlattenString(str); // Flatten the string for efficiency.
}
- int string_length = str->length();
- if (str->IsAsciiRepresentation()) {
- int len;
- if (capacity == -1) {
- capacity = str->length() + 1;
- len = string_length;
- } else {
- len = i::Min(capacity, str->length());
- }
- i::String::WriteToFlat(*str, buffer, 0, len);
- if (nchars_ref != NULL) *nchars_ref = len;
- if (!(options & NO_NULL_TERMINATION) && capacity > len) {
- buffer[len] = '\0';
- return len + 1;
- }
- return len;
- }
-
- if (capacity == -1 || capacity / 3 >= string_length) {
- int32_t previous = unibrow::Utf16::kNoPreviousCharacter;
- const int kMaxRecursion = 100;
- int utf8_bytes =
- RecursivelySerializeToUtf8(*str,
- buffer,
- 0,
- string_length,
- kMaxRecursion,
- previous,
- &previous);
- if (utf8_bytes >= 0) {
- // Success serializing with recursion.
- if ((options & NO_NULL_TERMINATION) == 0 &&
- (capacity > utf8_bytes || capacity == -1)) {
- buffer[utf8_bytes++] = '\0';
- }
- if (nchars_ref != NULL) *nchars_ref = string_length;
- return utf8_bytes;
- }
- FlattenString(str);
- // Recurse once. This time around the string is flat and the serializing
- // with recursion will certainly succeed.
- return WriteUtf8(buffer, capacity, nchars_ref, options);
- } else if (capacity >= string_length) {
- // First check that the buffer is large enough. If it is, then recurse
- // once without a capacity limit, which will get into the other branch of
- // this 'if'.
- int utf8_bytes = i::Utf8Length(str);
- if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++;
- if (utf8_bytes <= capacity) {
- return WriteUtf8(buffer, -1, nchars_ref, options);
- }
- }
-
- // Slow case.
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
- isolate->string_tracker()->RecordWrite(str);
-
- write_input_buffer.Reset(0, *str);
- int len = str->length();
- // Encode the first K - 3 bytes directly into the buffer since we
- // know there's room for them. If no capacity is given we copy all
- // of them here.
- int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1);
- int i;
- int pos = 0;
- int nchars = 0;
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) {
- i::uc32 c = write_input_buffer.GetNext();
- int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
- pos += written;
- nchars++;
- previous = c;
- }
- if (i < len) {
- // For the last characters we need to check the length for each one
- // because they may be longer than the remaining space in the
- // buffer.
- char intermediate[unibrow::Utf8::kMaxEncodedSize];
- for (; i < len && pos < capacity; i++) {
- i::uc32 c = write_input_buffer.GetNext();
- if (unibrow::Utf16::IsTrailSurrogate(c) &&
- unibrow::Utf16::IsLeadSurrogate(previous)) {
- // We can't use the intermediate buffer here because the encoding
- // of surrogate pairs is done under assumption that you can step
- // back and fix the UTF8 stream. Luckily we only need space for one
- // more byte, so there is always space.
- ASSERT(pos < capacity);
- int written = unibrow::Utf8::Encode(buffer + pos, c, previous);
- ASSERT(written == 1);
- pos += written;
- nchars++;
- } else {
- int written =
- unibrow::Utf8::Encode(intermediate,
- c,
- unibrow::Utf16::kNoPreviousCharacter);
- if (pos + written <= capacity) {
- for (int j = 0; j < written; j++) {
- buffer[pos + j] = intermediate[j];
- }
- pos += written;
- nchars++;
- } else {
- // We've reached the end of the buffer
- break;
- }
- }
- previous = c;
+ Utf8WriterVisitor writer(buffer, capacity);
+ i::ConsStringIteratorOp* op = isolate->write_iterator();
+ op->Reset();
+ int32_t type = str->map()->instance_type();
+ unsigned str_length = static_cast<unsigned>(str->length());
+ if (str_length != 0) {
+ i::String::Visit(*str, 0, writer, *op, type, str_length);
+ while (!writer.IsDone()) {
+ unsigned length_out;
+ i::String* next = op->ContinueOperation(&type, &length_out);
+ if (next == NULL) break;
+ // TODO(dcarney): need an asserting null op.
+ i::ConsStringNullOp null_op;
+ i::String::Visit(next, 0, writer, null_op, type, length_out);
}
}
- if (nchars_ref != NULL) *nchars_ref = nchars;
- if (!(options & NO_NULL_TERMINATION) &&
- (i == len && (capacity == -1 || pos < capacity))) {
- buffer[pos++] = '\0';
- }
- return pos;
+ return writer.CompleteWrite(!(options & NO_NULL_TERMINATION), nchars_ref);
}
@@ -4151,11 +4220,14 @@ int String::WriteAscii(char* buffer,
FlattenString(str); // Flatten the string for efficiency.
}
- if (str->IsAsciiRepresentation()) {
- // WriteToFlat is faster than using the StringInputBuffer.
+ if (str->HasOnlyAsciiChars()) {
+ // WriteToFlat is faster than using the StringCharacterStream.
if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start);
- i::String::WriteToFlat(*str, buffer, start, start + len);
+ i::String::WriteToFlat(*str,
+ reinterpret_cast<uint8_t*>(buffer),
+ start,
+ start + len);
if (!(options & PRESERVE_ASCII_NULL)) {
for (int i = 0; i < len; i++) {
if (buffer[i] == '\0') buffer[i] = ' ';
@@ -4167,16 +4239,15 @@ int String::WriteAscii(char* buffer,
return len;
}
- i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer();
int end = length;
if ((length == -1) || (length > str->length() - start)) {
end = str->length() - start;
}
if (end < 0) return 0;
- write_input_buffer.Reset(start, *str);
+ i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start);
int i;
for (i = 0; i < end; i++) {
- char c = static_cast<char>(write_input_buffer.GetNext());
+ char c = static_cast<char>(write_stream.GetNext());
if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' ';
buffer[i] = c;
}
@@ -4187,20 +4258,22 @@ int String::WriteAscii(char* buffer,
}
-int String::Write(uint16_t* buffer,
- int start,
- int length,
- int options) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+template<typename CharType>
+static inline int WriteHelper(const String* string,
+ CharType* buffer,
+ int start,
+ int length,
+ int options) {
+ i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
if (IsDeadCheck(isolate, "v8::String::Write()")) return 0;
LOG_API(isolate, "String::Write");
ENTER_V8(isolate);
ASSERT(start >= 0 && length >= -1);
- i::Handle<i::String> str = Utils::OpenHandle(this);
+ i::Handle<i::String> str = Utils::OpenHandle(string);
isolate->string_tracker()->RecordWrite(str);
- if (options & HINT_MANY_WRITES_EXPECTED) {
+ if (options & String::HINT_MANY_WRITES_EXPECTED) {
// Flatten the string for efficiency. This applies whether we are
- // using StringInputBuffer or Get(i) to access the characters.
+ // using StringCharacterStream or Get(i) to access the characters.
FlattenString(str);
}
int end = start + length;
@@ -4208,7 +4281,7 @@ int String::Write(uint16_t* buffer,
end = str->length();
if (end < 0) return 0;
i::String::WriteToFlat(*str, buffer, start, end);
- if (!(options & NO_NULL_TERMINATION) &&
+ if (!(options & String::NO_NULL_TERMINATION) &&
(length == -1 || end - start < length)) {
buffer[end - start] = '\0';
}
@@ -4216,6 +4289,22 @@ int String::Write(uint16_t* buffer,
}
+int String::WriteOneByte(uint8_t* buffer,
+ int start,
+ int length,
+ int options) const {
+ return WriteHelper(this, buffer, start, length, options);
+}
+
+
+int String::Write(uint16_t* buffer,
+ int start,
+ int length,
+ int options) const {
+ return WriteHelper(this, buffer, start, length, options);
+}
+
+
bool v8::String::IsExternal() const {
i::Handle<i::String> str = Utils::OpenHandle(this);
if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) {
@@ -4266,7 +4355,7 @@ void v8::String::VerifyExternalStringResourceBase(
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
- expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING
+ expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
: TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
@@ -4346,86 +4435,70 @@ int v8::Object::InternalFieldCount() {
}
-Local<Value> v8::Object::CheckedGetInternalField(int index) {
+static bool InternalFieldOK(i::Handle<i::JSObject> obj,
+ int index,
+ const char* location) {
+ return !IsDeadCheck(obj->GetIsolate(), location) &&
+ ApiCheck(index < obj->GetInternalFieldCount(),
+ location,
+ "Internal field out of bounds");
+}
+
+
+Local<Value> v8::Object::SlowGetInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
- return Local<Value>();
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::GetInternalField()",
- "Reading internal field out of bounds")) {
- return Local<Value>();
- }
- i::Handle<i::Object> value(obj->GetInternalField(index));
- Local<Value> result = Utils::ToLocal(value);
-#ifdef DEBUG
- Local<Value> unchecked = UncheckedGetInternalField(index);
- ASSERT(unchecked.IsEmpty() || (unchecked == result));
-#endif
- return result;
+ const char* location = "v8::Object::GetInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return Local<Value>();
+ i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
+ return Utils::ToLocal(value);
}
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Isolate* isolate = obj->GetIsolate();
- if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
- return;
- }
- if (!ApiCheck(index < obj->GetInternalFieldCount(),
- "v8::Object::SetInternalField()",
- "Writing internal field out of bounds")) {
- return;
- }
- ENTER_V8(isolate);
+ const char* location = "v8::Object::SetInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return;
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
+ ASSERT_EQ(value, GetInternalField(index));
}
-static bool CanBeEncodedAsSmi(void* ptr) {
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- return ((address & i::kEncodablePointerMask) == 0);
+void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return NULL;
+ return DecodeSmiToAligned(obj->GetInternalField(index), location);
}
-static i::Smi* EncodeAsSmi(void* ptr) {
- ASSERT(CanBeEncodedAsSmi(ptr));
- const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
- i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
- ASSERT(i::Internals::HasSmiTag(result));
- ASSERT_EQ(result, i::Smi::FromInt(result->value()));
- ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
- return result;
+void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ const char* location = "v8::Object::SetAlignedPointerInInternalField()";
+ if (!InternalFieldOK(obj, index, location)) return;
+ obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
+ ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
}
-void v8::Object::SetPointerInInternalField(int index, void* value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- if (CanBeEncodedAsSmi(value)) {
- Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
- } else {
- HandleScope scope;
- i::Handle<i::Foreign> foreign =
- isolate->factory()->NewForeign(
- reinterpret_cast<i::Address>(value), i::TENURED);
- if (!foreign.is_null()) {
- Utils::OpenHandle(this)->SetInternalField(index, *foreign);
- }
- }
- ASSERT_EQ(value, GetPointerFromInternalField(index));
+static void* ExternalValue(i::Object* obj) {
+ // Obscure semantics for undefined, but somehow checked in our unit tests...
+ if (obj->IsUndefined()) return NULL;
+ i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
+ return i::Foreign::cast(foreign)->foreign_address();
}
-void v8::Object::SetExternalResource(v8::Object::ExternalResource *resource) {
+void v8::Object::SetExternalResource(v8::Object::ExternalResource* resource) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- if (CanBeEncodedAsSmi(resource)) {
- obj->SetExternalResourceObject(EncodeAsSmi(resource));
+ if (resource != NULL) {
+ obj->SetExternalResourceObject(
+ *isolate->factory()->NewForeign(
+ reinterpret_cast<i::Address>(resource)));
} else {
- obj->SetExternalResourceObject(*isolate->factory()->NewForeign(static_cast<i::Address>((void *)resource)));
+ obj->SetExternalResourceObject(0);
}
if (!obj->IsSymbol()) {
isolate->heap()->external_string_table()->AddObject(*obj);
@@ -4433,13 +4506,12 @@ void v8::Object::SetExternalResource(v8::Object::ExternalResource *resource) {
}
-v8::Object::ExternalResource *v8::Object::GetExternalResource() {
+v8::Object::ExternalResource* v8::Object::GetExternalResource() {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Object* value = obj->GetExternalResourceObject();
- if (value->IsSmi()) {
- return reinterpret_cast<v8::Object::ExternalResource*>(i::Internals::GetExternalPointerFromSmi(value));
- } else if (value->IsForeign()) {
- return reinterpret_cast<v8::Object::ExternalResource*>(i::Foreign::cast(value)->foreign_address());
+ if (value->IsForeign()) {
+ return reinterpret_cast<v8::Object::ExternalResource*>(
+ i::Foreign::cast(value)->foreign_address());
} else {
return NULL;
}
@@ -4503,23 +4575,18 @@ HeapStatistics::HeapStatistics(): total_heap_size_(0),
void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
- if (!i::Isolate::Current()->IsInitialized()) {
+ i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized()) {
// Isolate is unitialized thus heap is not configured yet.
- heap_statistics->set_total_heap_size(0);
- heap_statistics->set_total_heap_size_executable(0);
- heap_statistics->set_total_physical_size(0);
- heap_statistics->set_used_heap_size(0);
- heap_statistics->set_heap_size_limit(0);
+ heap_statistics->total_heap_size_ = 0;
+ heap_statistics->total_heap_size_executable_ = 0;
+ heap_statistics->total_physical_size_ = 0;
+ heap_statistics->used_heap_size_ = 0;
+ heap_statistics->heap_size_limit_ = 0;
return;
}
-
- i::Heap* heap = i::Isolate::Current()->heap();
- heap_statistics->set_total_heap_size(heap->CommittedMemory());
- heap_statistics->set_total_heap_size_executable(
- heap->CommittedMemoryExecutable());
- heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
- heap_statistics->set_used_heap_size(heap->SizeOfObjects());
- heap_statistics->set_heap_size_limit(heap->MaxReserved());
+ Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate);
+ return ext_isolate->GetHeapStatistics(heap_statistics);
}
@@ -4530,30 +4597,47 @@ void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
}
+class VisitorAdapter : public i::ObjectVisitor {
+ public:
+ explicit VisitorAdapter(PersistentHandleVisitor* visitor)
+ : visitor_(visitor) {}
+ virtual void VisitPointers(i::Object** start, i::Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
+ visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
+ class_id);
+ }
+ private:
+ PersistentHandleVisitor* visitor_;
+};
+
+
void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId");
i::AssertNoAllocation no_allocation;
- class VisitorAdapter : public i::ObjectVisitor {
- public:
- explicit VisitorAdapter(PersistentHandleVisitor* visitor)
- : visitor_(visitor) {}
- virtual void VisitPointers(i::Object** start, i::Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) {
- visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)),
- class_id);
- }
- private:
- PersistentHandleVisitor* visitor_;
- } visitor_adapter(visitor);
+ VisitorAdapter visitor_adapter(visitor);
isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter);
}
+void v8::V8::VisitHandlesForPartialDependence(
+ Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
+ ASSERT(isolate == i::Isolate::Current());
+ IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence");
+
+ i::AssertNoAllocation no_allocation;
+
+ VisitorAdapter visitor_adapter(visitor);
+ isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds(
+ &visitor_adapter);
+}
+
+
bool v8::V8::IdleNotification(int hint) {
// Returning true tells the caller that it need not
// continue to call IdleNotification.
@@ -4643,7 +4727,6 @@ Persistent<Context> v8::Context::New(
// Create the environment.
env = isolate->bootstrapper()->CreateEnvironment(
- isolate,
Utils::OpenHandle(*global_object, true),
proxy_template,
extensions);
@@ -4699,7 +4782,7 @@ Handle<Value> v8::Context::GetSecurityToken() {
}
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Object* security_token = env->security_token();
- i::Handle<i::Object> token_handle(security_token);
+ i::Handle<i::Object> token_handle(security_token, isolate);
return Utils::ToLocal(token_handle);
}
@@ -4715,6 +4798,12 @@ bool Context::InContext() {
}
+v8::Isolate* Context::GetIsolate() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ return reinterpret_cast<Isolate*>(env->GetIsolate());
+}
+
+
v8::Local<v8::Context> Context::GetEntered() {
i::Isolate* isolate = i::Isolate::Current();
if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) {
@@ -4760,38 +4849,42 @@ v8::Local<v8::Object> Context::GetCallingQmlGlobal() {
}
i::Context *context = isolate->context();
- i::JavaScriptFrameIterator it;
+ i::JavaScriptFrameIterator it(isolate);
if (it.done()) return Local<Object>();
context = i::Context::cast(it.frame()->context());
if (!context->qml_global_object()->IsUndefined()) {
- i::Handle<i::Object> qmlglobal(context->qml_global_object());
+ i::Handle<i::Object> qmlglobal(context->qml_global_object(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(qmlglobal));
} else {
- return Local<Object>();
+ return Local<Object>();
}
}
-v8::Local<v8::Value> Context::GetCallingScriptData()
-{
+
+v8::Local<v8::Value> Context::GetCallingScriptData() {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Context::GetCallingScriptData()")) {
return Local<Object>();
}
- i::JavaScriptFrameIterator it;
+ i::JavaScriptFrameIterator it(isolate);
if (it.done()) return Local<Object>();
- i::Handle<i::Script> script(i::Script::cast(i::JSFunction::cast(it.frame()->function())->shared()->script()));
- return Utils::ToLocal(i::Handle<i::Object>(script->data()));
+ i::Handle<i::JSFunction> function =
+ i::Handle<i::JSFunction>(i::JSFunction::cast(it.frame()->function()));
+ i::Handle<i::Script> script(i::Script::cast(function->shared()->script()));
+ return Utils::ToLocal(i::Handle<i::Object>(script->data(), isolate));
}
+
v8::Local<v8::Object> Context::Global() {
- if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Context::Global()")) {
return Local<v8::Object>();
}
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy());
+ i::Handle<i::Object> global(context->global_proxy(), isolate);
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
@@ -4859,21 +4952,11 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> error_handle = Utils::OpenHandle(*error);
+ i::Handle<i::String> error_handle = Utils::OpenHandle(*error);
context->set_error_message_for_code_gen_from_strings(*error_handle);
}
-void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) {
- i::GlobalHandles::SetWrapperClassId(global_handle, class_id);
-}
-
-
-uint16_t V8::GetWrapperClassId(internal::Object** global_handle) {
- return i::GlobalHandles::GetWrapperClassId(global_handle);
-}
-
-
Local<v8::Object> ObjectTemplate::NewInstance() {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
@@ -4912,74 +4995,20 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
}
-static Local<External> ExternalNewImpl(void* data) {
- return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
-}
-
-static void* ExternalValueImpl(i::Handle<i::Object> obj) {
- return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
-}
-
-
-Local<Value> v8::External::Wrap(void* data) {
- i::Isolate* isolate = i::Isolate::Current();
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
- EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
- LOG_API(isolate, "External::Wrap");
- ENTER_V8(isolate);
-
- v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
- ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
- : v8::Local<v8::Value>(ExternalNewImpl(data));
-
- ASSERT_EQ(data, Unwrap(result));
- return result;
-}
-
-
-void* v8::Object::SlowGetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Object* value = obj->GetInternalField(index);
- if (value->IsSmi()) {
- return i::Internals::GetExternalPointerFromSmi(value);
- } else if (value->IsForeign()) {
- return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
- } else {
- return NULL;
- }
-}
-
-
-void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
- void* result;
- if (obj->IsSmi()) {
- result = i::Internals::GetExternalPointerFromSmi(*obj);
- } else if (obj->IsForeign()) {
- result = ExternalValueImpl(obj);
- } else {
- result = NULL;
- }
- ASSERT_EQ(result, QuickUnwrap(wrapper));
- return result;
-}
-
-
-Local<External> v8::External::New(void* data) {
- STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+Local<External> v8::External::New(void* value) {
+ STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::External::New()");
LOG_API(isolate, "External::New");
ENTER_V8(isolate);
- return ExternalNewImpl(data);
+ i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
+ return Utils::ExternalToLocal(external);
}
void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- return ExternalValueImpl(obj);
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
+ return ExternalValue(*Utils::OpenHandle(this));
}
@@ -4989,7 +5018,7 @@ Local<String> v8::String::Empty() {
return v8::Local<String>();
}
LOG_API(isolate, "String::Empty()");
- return Utils::ToLocal(isolate->factory()->empty_symbol());
+ return Utils::ToLocal(isolate->factory()->empty_string());
}
@@ -5114,7 +5143,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
+ if (result && !obj->IsInternalizedString()) {
isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
@@ -5151,7 +5180,7 @@ bool v8::String::MakeExternal(
}
CHECK(resource && resource->data());
bool result = obj->MakeExternal(resource);
- if (result && !obj->IsSymbol()) {
+ if (result && !obj->IsInternalizedString()) {
isolate->heap()->external_string_table()->AddString(*obj);
}
return result;
@@ -5208,8 +5237,10 @@ Local<v8::Value> v8::BooleanObject::New(bool value) {
EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
LOG_API(isolate, "BooleanObject::New");
ENTER_V8(isolate);
- i::Handle<i::Object> boolean(value ? isolate->heap()->true_value()
- : isolate->heap()->false_value());
+ i::Handle<i::Object> boolean(value
+ ? isolate->heap()->true_value()
+ : isolate->heap()->false_value(),
+ isolate);
i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean);
return Utils::ToLocal(obj);
}
@@ -5288,7 +5319,8 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
i::HandleScope scope(isolate);
// Get the function ResetDateCache (defined in date.js).
i::Handle<i::String> func_name_str =
- isolate->factory()->LookupAsciiSymbol("ResetDateCache");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ResetDateCache"));
i::MaybeObject* result =
isolate->js_builtins_object()->GetProperty(*func_name_str);
i::Object* object_func;
@@ -5312,14 +5344,14 @@ void v8::Date::DateTimeConfigurationChangeNotification() {
static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
- char flags_buf[3];
+ uint8_t flags_buf[3];
int num_flags = 0;
if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
- return FACTORY->LookupSymbol(
- i::Vector<const char>(flags_buf, num_flags));
+ return FACTORY->InternalizeOneByteString(
+ i::Vector<const uint8_t>(flags_buf, num_flags));
}
@@ -5423,8 +5455,8 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
LOG_API(isolate, "String::NewSymbol(char)");
ENTER_V8(isolate);
if (length == -1) length = i::StrLength(data);
- i::Handle<i::String> result =
- isolate->factory()->LookupSymbol(i::Vector<const char>(data, length));
+ i::Handle<i::String> result = isolate->factory()->InternalizeUtf8String(
+ i::Vector<const char>(data, length));
return Utils::ToLocal(result);
}
@@ -5487,14 +5519,18 @@ void V8::IgnoreOutOfMemoryException() {
}
-bool V8::AddMessageListener(MessageCallback that) {
+bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
- listeners.add(isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
+ NeanderObject obj(2);
+ obj.set(0, *isolate->factory()->NewForeign(FUNCTION_ADDR(that)));
+ obj.set(1, data.IsEmpty() ? isolate->heap()->undefined_value()
+ : *Utils::OpenHandle(*data));
+ listeners.add(obj.value());
return true;
}
@@ -5509,7 +5545,8 @@ void V8::RemoveMessageListeners(MessageCallback that) {
for (int i = 0; i < listeners.length(); i++) {
if (listeners.get(i)->IsUndefined()) continue; // skip deleted ones
- i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listeners.get(i)));
+ NeanderObject listener(i::JSObject::cast(listeners.get(i)));
+ i::Handle<i::Foreign> callback_obj(i::Foreign::cast(listener.get(0)));
if (callback_obj->foreign_address() == FUNCTION_ADDR(that)) {
listeners.set(i, isolate->heap()->undefined_value());
}
@@ -5549,13 +5586,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
SetAddHistogramSampleFunction(callback);
}
-void V8::EnableSlidingStateWindow() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
- isolate->logger()->EnableSlidingStateWindow();
-}
-
-
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
@@ -5569,7 +5599,8 @@ void V8::SetFailedAccessCheckCallbackFunction(
void V8::SetUserObjectComparisonCallbackFunction(
UserObjectComparisonCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::V8::SetUserObjectComparisonCallbackFunction()")) {
+ if (IsDeadCheck(isolate,
+ "v8::V8::SetUserObjectComparisonCallbackFunction()")) {
return;
}
isolate->SetUserObjectComparisonCallback(callback);
@@ -5587,6 +5618,19 @@ void V8::AddObjectGroup(Persistent<Value>* objects,
}
+void V8::AddObjectGroup(Isolate* exported_isolate,
+ Persistent<Value>* objects,
+ size_t length,
+ RetainedObjectInfo* info) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
+ ASSERT(isolate == i::Isolate::Current());
+ if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
+ STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
+ isolate->global_handles()->AddObjectGroup(
+ reinterpret_cast<i::Object***>(objects), length, info);
+}
+
+
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
@@ -5780,6 +5824,18 @@ void Isolate::Exit() {
}
+void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Heap* heap = isolate->heap();
+ heap_statistics->total_heap_size_ = heap->CommittedMemory();
+ heap_statistics->total_heap_size_executable_ =
+ heap->CommittedMemoryExecutable();
+ heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory();
+ heap_statistics->used_heap_size_ = heap->SizeOfObjects();
+ heap_statistics->heap_size_limit_ = heap->MaxReserved();
+}
+
+
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
: str_(NULL), length_(0) {
i::Isolate* isolate = i::Isolate::Current();
@@ -5791,7 +5847,7 @@ String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
Handle<String> str = obj->ToString();
if (str.IsEmpty()) return;
i::Handle<i::String> i_str = Utils::OpenHandle(*str);
- length_ = i::Utf8Length(i_str);
+ length_ = v8::Utf8Length(*i_str, isolate);
str_ = i::NewArray<char>(length_ + 1);
str->WriteUtf8(str_);
}
@@ -5855,7 +5911,7 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -5872,7 +5928,7 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
isolate->factory()->NewReferenceError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -5888,7 +5944,7 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -5904,7 +5960,7 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -5920,7 +5976,7 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
i::Handle<i::Object> result = isolate->factory()->NewError(message);
error = *result;
}
- i::Handle<i::Object> result(error);
+ i::Handle<i::Object> result(error, isolate);
return Utils::ToLocal(result);
}
@@ -6122,9 +6178,9 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
i::Debug* isolate_debug = isolate->debug();
isolate_debug->Load();
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
- i::Handle<i::String> name =
- isolate->factory()->LookupAsciiSymbol("MakeMirror");
- i::Handle<i::Object> fun_obj = i::GetProperty(debug, name);
+ i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("MakeMirror"));
+ i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name);
i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
const int kArgc = 1;
@@ -6185,11 +6241,11 @@ Handle<String> CpuProfileNode::GetFunctionName() const {
const i::CodeEntry* entry = node->entry();
if (!entry->has_name_prefix()) {
return Handle<String>(ToApi<String>(
- isolate->factory()->LookupAsciiSymbol(entry->name())));
+ isolate->factory()->InternalizeUtf8String(entry->name())));
} else {
return Handle<String>(ToApi<String>(isolate->factory()->NewConsString(
- isolate->factory()->LookupAsciiSymbol(entry->name_prefix()),
- isolate->factory()->LookupAsciiSymbol(entry->name()))));
+ isolate->factory()->InternalizeUtf8String(entry->name_prefix()),
+ isolate->factory()->InternalizeUtf8String(entry->name()))));
}
}
@@ -6198,7 +6254,7 @@ Handle<String> CpuProfileNode::GetScriptResourceName() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName");
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
node->entry()->resource_name())));
}
@@ -6284,7 +6340,7 @@ Handle<String> CpuProfile::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::CpuProfile::GetTitle");
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
profile->title())));
}
@@ -6381,12 +6437,12 @@ Handle<Value> HeapGraphEdge::GetName() const {
case i::HeapGraphEdge::kInternal:
case i::HeapGraphEdge::kProperty:
case i::HeapGraphEdge::kShortcut:
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
- edge->name())));
+ return Handle<String>(ToApi<String>(
+ isolate->factory()->InternalizeUtf8String(edge->name())));
case i::HeapGraphEdge::kElement:
case i::HeapGraphEdge::kHidden:
- return Handle<Number>(ToApi<Number>(isolate->factory()->NewNumberFromInt(
- edge->index())));
+ return Handle<Number>(ToApi<Number>(
+ isolate->factory()->NewNumberFromInt(edge->index())));
default: UNREACHABLE();
}
return v8::Undefined();
@@ -6425,7 +6481,7 @@ HeapGraphNode::Type HeapGraphNode::GetType() const {
Handle<String> HeapGraphNode::GetName() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapGraphNode::GetName");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
ToInternal(this)->name())));
}
@@ -6504,7 +6560,7 @@ unsigned HeapSnapshot::GetUid() const {
Handle<String> HeapSnapshot::GetTitle() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle");
- return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol(
+ return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String(
ToInternal(this)->title())));
}
@@ -6597,7 +6653,8 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
- ActivityControl* control) {
+ ActivityControl* control,
+ ObjectNameResolver* resolver) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@@ -6610,7 +6667,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control));
+ *Utils::OpenHandle(*title), internal_type, control, resolver));
}
@@ -6720,8 +6777,10 @@ void Testing::PrepareStressRun(int run) {
}
+// TODO(svenpanne) Deprecate this.
void Testing::DeoptimizeAll() {
- i::HandleScope scope;
+ i::Isolate* isolate = i::Isolate::Current();
+ i::HandleScope scope(isolate);
internal::Deoptimizer::DeoptimizeAll();
}
diff --git a/src/3rdparty/v8/src/api.h b/src/3rdparty/v8/src/api.h
index 7197b6c..ca2240b 100644
--- a/src/3rdparty/v8/src/api.h
+++ b/src/3rdparty/v8/src/api.h
@@ -201,8 +201,6 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
- static inline Local<External> ToLocal(
- v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -225,6 +223,8 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
+ static inline Local<External> ExternalToLocal(
+ v8::internal::Handle<v8::internal::JSObject> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -268,7 +268,6 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
-MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -280,6 +279,7 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
+MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
#undef MAKE_TO_LOCAL
diff --git a/src/3rdparty/v8/src/arm/assembler-arm-inl.h b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
index acd61fe..af29bb8 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm-inl.h
+++ b/src/3rdparty/v8/src/arm/assembler-arm-inl.h
@@ -47,13 +47,54 @@ namespace v8 {
namespace internal {
+int Register::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return kMaxNumAllocatableRegisters;
+ } else {
+ return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
+ }
+}
+
+
+int DwVfpRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
+ } else {
+ return 1;
+ }
+}
+
+
+int DwVfpRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ return NumRegisters() - kNumReservedRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kScratchDoubleReg));
+ if (reg.code() > kDoubleRegZero.code()) {
+ return reg.code() - kNumReservedRegisters;
+ }
return reg.code();
}
+DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code()) {
+ return from_code(index + kNumReservedRegisters);
+ }
+ return from_code(index);
+}
+
+
void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object.
@@ -317,7 +358,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.cc b/src/3rdparty/v8/src/arm/assembler-arm.cc
index b679efa..a8c32d9 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/assembler-arm.cc
@@ -51,6 +51,11 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
@@ -66,6 +71,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
+#ifdef CAN_USE_VFP32DREGS
+ answer |= 1u << VFP32DREGS;
+#endif // CAN_USE_VFP32DREGS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
@@ -85,6 +93,22 @@ static unsigned CpuFeaturesImpliedByCompiler() {
}
+const char* DwVfpRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code())
+ index += kNumReservedRegisters;
+
+ return VFPRegisters::Name(index, true);
+ } else {
+ ASSERT(index == 0);
+ return "sfpd0";
+ }
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = static_cast<unsigned>(
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
@@ -121,6 +145,11 @@ void CpuFeatures::Probe() {
if (FLAG_enable_movw_movt) {
supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
+
+ if (FLAG_enable_32dregs) {
+ supported_ |= 1u << VFP32DREGS;
+ }
+
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
@@ -149,6 +178,10 @@ void CpuFeatures::Probe() {
found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
}
+ if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
+ found_by_runtime_probing_ |= 1u << VFP32DREGS;
+ }
+
supported_ |= found_by_runtime_probing_;
#endif
@@ -207,7 +240,7 @@ Operand::Operand(Handle<Object> handle) {
} else {
// no relocation needed
imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
@@ -282,8 +315,11 @@ const Instr kPopRegPattern =
// mov lr, pc
const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
// ldr rd, [pc, #offset]
-const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// vldr dd, [pc, #offset]
+const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -318,45 +354,13 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
-// Spare buffer.
-static const int kMinimalBufferSize = 4*KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
@@ -368,14 +372,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
}
@@ -383,6 +379,7 @@ void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Set up code descriptor.
desc->buffer = buffer_;
@@ -429,6 +426,11 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) {
}
+bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
+ return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
+}
+
+
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = (instr & B23) == B23;
@@ -437,6 +439,15 @@ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
}
+int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
+ ASSERT(IsVldrDRegisterImmediate(instr));
+ bool positive = (instr & B23) == B23;
+ int offset = instr & kOff8Mask; // Zero extended offset.
+ offset <<= 2;
+ return positive ? offset : -offset;
+}
+
+
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
ASSERT(IsLdrRegisterImmediate(instr));
bool positive = offset >= 0;
@@ -449,6 +460,19 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
}
+Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
+ ASSERT(IsVldrDRegisterImmediate(instr));
+ ASSERT((offset & ~3) == offset); // Must be 64-bit aligned.
+ bool positive = offset >= 0;
+ if (!positive) offset = -offset;
+ ASSERT(is_uint10(offset));
+ // Set bit indicating whether the offset should be added.
+ instr = (instr & ~B23) | (positive ? B23 : 0);
+ // Set the actual offset. Its bottom 2 bits are zero.
+ return (instr & ~kOff8Mask) | (offset >> 2);
+}
+
+
bool Assembler::IsStrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
}
@@ -534,7 +558,14 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// ldr<cond> <Rd>, [pc +/- offset_12].
- return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000;
+ return (instr & kLdrPCMask) == kLdrPCPattern;
+}
+
+
+bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pc +/- offset_10].
+ return (instr & kVldrDPCMask) == kVldrDPCPattern;
}
@@ -810,7 +841,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
#endif // def DEBUG
if (assembler != NULL && assembler->predictable_code_size()) return true;
return Serializer::enabled();
- } else if (rmode_ == RelocInfo::NONE) {
+ } else if (RelocInfo::IsNone(rmode_)) {
return false;
}
return true;
@@ -1721,19 +1752,21 @@ void Assembler::vldr(const DwVfpRegister dst,
int offset,
const Condition cond) {
// Ddst = MEM(Rbase + offset).
- // Instruction details available in ARM DDI 0406A, A8-628.
- // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
- // Vdst(15-12) | 1011(11-8) | offset
+ // Instruction details available in ARM DDI 0406C.b, A8-924.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | offset
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
u = 0;
}
+ int vd, d;
+ dst.split_code(&vd, &d);
ASSERT(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
@@ -1744,7 +1777,7 @@ void Assembler::vldr(const DwVfpRegister dst,
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1807,9 +1840,9 @@ void Assembler::vstr(const DwVfpRegister src,
int offset,
const Condition cond) {
// MEM(Rbase + offset) = Dsrc.
- // Instruction details available in ARM DDI 0406A, A8-786.
- // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) |
- // Vsrc(15-12) | 1011(11-8) | (offset/4)
+ // Instruction details available in ARM DDI 0406C.b, A8-1082.
+ // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
+ // Vd(15-12) | 1011(11-8) | (offset/4)
ASSERT(CpuFeatures::IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
@@ -1817,9 +1850,12 @@ void Assembler::vstr(const DwVfpRegister src,
u = 0;
}
ASSERT(offset >= 0);
+ int vd, d;
+ src.split_code(&vd, &d);
+
if ((offset % 4) == 0 && (offset / 4) < 256) {
- emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
- 0xB*B8 | ((offset / 4) & 255));
+ emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
+ ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
@@ -1829,7 +1865,7 @@ void Assembler::vstr(const DwVfpRegister src,
} else {
sub(ip, base, Operand(offset));
}
- emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
+ emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
}
}
@@ -1891,9 +1927,9 @@ void Assembler::vldm(BlockAddrMode am,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-626.
+ // Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
- // first(15-12) | 1010(11-8) | (count * 2)
+ // first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
@@ -1913,7 +1949,7 @@ void Assembler::vstm(BlockAddrMode am,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
- // Instruction details available in ARM DDI 0406A, A8-784.
+ // Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP2));
@@ -2031,37 +2067,69 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
- const Register scratch,
- const Condition cond) {
- // Dd = immediate
- // Instruction details available in ARM DDI 0406B, A8-640.
+ const Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc);
+ //
+ // Dd = immediate
+ // Instruction details available in ARM DDI 0406C.b, A8-936.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
+ } else if (FLAG_enable_vldr_imm) {
+ // TODO(jfb) Temporarily turned off until we have constant blinding or
+ // some equivalent mitigation: an attacker can otherwise control
+ // generated data which also happens to be executable, a Very Bad
+ // Thing indeed.
+ // Blinding gets tricky because we don't have xor, we probably
+ // need to add/subtract without losing precision, which requires a
+ // cookie value that Lithium is probably better positioned to
+ // choose.
+ // We could also add a few peepholes here like detecting 0.0 and
+ // -0.0 and doing a vmov from the sequestered d14, forcing denorms
+ // to zero (we set flush-to-zero), and normalizing NaN values.
+ // We could also detect redundant values.
+ // The code could also randomize the order of values, though
+ // that's tricky because vldr has a limited reach. Furthermore
+ // it breaks load locality.
+ RecordRelocInfo(imm);
+ vldr(dst, MemOperand(pc, 0));
} else {
- // Synthesise the double from ARM immediates. This could be implemented
- // using vldr from a constant pool.
+ // Synthesise the double from ARM immediates.
uint32_t lo, hi;
DoubleAsTwoUInt32(imm, &lo, &hi);
- mov(ip, Operand(lo));
if (scratch.is(no_reg)) {
- // Move the low part of the double into the lower of the corresponsing S
- // registers of D register dst.
- vmov(dst.low(), ip, cond);
-
- // Move the high part of the double into the higher of the corresponsing S
- // registers of D register dst.
- mov(ip, Operand(hi));
- vmov(dst.high(), ip, cond);
+ if (dst.code() < 16) {
+ // Move the low part of the double into the lower of the corresponsing S
+ // registers of D register dst.
+ mov(ip, Operand(lo));
+ vmov(dst.low(), ip);
+
+ // Move the high part of the double into the higher of the
+ // corresponsing S registers of D register dst.
+ mov(ip, Operand(hi));
+ vmov(dst.high(), ip);
+ } else {
+ // D16-D31 does not have S registers, so move the low and high parts
+ // directly to the D register using vmov.32.
+ // Note: This may be slower, so we only do this when we have to.
+ mov(ip, Operand(lo));
+ vmov(dst, VmovIndexLo, ip);
+ mov(ip, Operand(hi));
+ vmov(dst, VmovIndexHi, ip);
+ }
} else {
// Move the low and high parts of the double to a D register in one
// instruction.
+ mov(ip, Operand(lo));
mov(scratch, Operand(hi));
- vmov(dst, ip, scratch, cond);
+ vmov(dst, ip, scratch);
}
}
}
@@ -2084,10 +2152,33 @@ void Assembler::vmov(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
// Dd = Dm
- // Instruction details available in ARM DDI 0406B, A8-642.
+ // Instruction details available in ARM DDI 0406C.b, A8-938.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
+ ASSERT(CpuFeatures::IsEnabled(VFP2));
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond) {
+ // Dd[index] = Rt
+ // Instruction details available in ARM DDI 0406C.b, A8-940.
+ // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
+ // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code());
+ ASSERT(index.index == 0 || index.index == 1);
+ int vd, d;
+ dst.split_code(&vd, &d);
+ emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
+ d*B7 | B4);
}
@@ -2096,13 +2187,15 @@ void Assembler::vmov(const DwVfpRegister dst,
const Register src2,
const Condition cond) {
// Dm = <Rt,Rt2>.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
+ int vm, m;
+ dst.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
- src1.code()*B12 | 0xB*B8 | B4 | dst.code());
+ src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2111,13 +2204,15 @@ void Assembler::vmov(const Register dst1,
const DwVfpRegister src,
const Condition cond) {
// <Rt,Rt2> = Dm.
- // Instruction details available in ARM DDI 0406A, A8-646.
+ // Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
+ int vm, m;
+ src.split_code(&vm, &m);
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
- dst1.code()*B12 | 0xB*B8 | B4 | src.code());
+ dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
}
@@ -2330,18 +2425,33 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-968.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
- 0x5*B9 | B8 | B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-524.
+ // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
+ // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 |
- 0x5*B9 | B8 | 0x3*B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
+ m*B5 | vm);
}
@@ -2351,12 +2461,18 @@ void Assembler::vadd(const DwVfpRegister dst,
const Condition cond) {
// Dd = vadd(Dn, Dm) double precision floating point addition.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-536.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-830.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
}
@@ -2366,12 +2482,18 @@ void Assembler::vsub(const DwVfpRegister dst,
const Condition cond) {
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-1086.
+ // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | B6 | m*B5 | vm);
}
@@ -2381,12 +2503,54 @@ void Assembler::vmul(const DwVfpRegister dst,
const Condition cond) {
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-784.
- // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-960.
+ // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
+ n*B7 | m*B5 | vm);
+}
+
+
+void Assembler::vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
+}
+
+
+void Assembler::vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond) {
+ // Instruction details available in ARM DDI 0406C.b, A8-932.
+ // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
+ m*B5 | vm);
}
@@ -2396,12 +2560,18 @@ void Assembler::vdiv(const DwVfpRegister dst,
const Condition cond) {
// Dd = vdiv(Dn, Dm) double precision floating point division.
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
- // Instruction details available in ARM DDI 0406A, A8-584.
- // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-882.
+ // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
+ vm);
}
@@ -2409,26 +2579,31 @@ void Assembler::vcmp(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
// vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
- src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
+ int vd, d;
+ src1.split_code(&vd, &d);
+ int vm, m;
+ src2.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
+ m*B5 | vm);
}
void Assembler::vcmp(const DwVfpRegister src1,
const double src2,
const Condition cond) {
- // vcmp(Dd, Dm) double precision floating point comparison.
- // Instruction details available in ARM DDI 0406A, A8-570.
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0)
+ // vcmp(Dd, #0.0) double precision floating point comparison.
+ // Instruction details available in ARM DDI 0406C.b, A8-864.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
ASSERT(src2 == 0.0);
- emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 |
- src1.code()*B12 | 0x5*B9 | B8 | B6);
+ int vd, d;
+ src1.split_code(&vd, &d);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
}
@@ -2455,11 +2630,16 @@ void Assembler::vmrs(Register dst, Condition cond) {
void Assembler::vsqrt(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
- // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0)
+ // Instruction details available in ARM DDI 0406C.b, A8-1058.
+ // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
+ // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
ASSERT(CpuFeatures::IsEnabled(VFP2));
- emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 |
- dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code());
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vm, m;
+ src.split_code(&vm, &m);
+ emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
+ m*B5 | vm);
}
@@ -2592,6 +2772,7 @@ void Assembler::db(uint8_t data) {
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2603,6 +2784,7 @@ void Assembler::dd(uint32_t data) {
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_pending_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -2626,16 +2808,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|| mode == DONT_USE_CONSTANT_POOL);
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
- if (num_pending_reloc_info_ == 0) {
- first_const_pool_use_ = pc_offset();
- }
- pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
+ RecordRelocInfoConstantPoolEntryHelper(rinfo);
}
- if (rinfo.rmode() != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
@@ -2661,14 +2836,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
+void Assembler::RecordRelocInfo(double data) {
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(pc_, data);
+ RecordRelocInfoConstantPoolEntryHelper(rinfo);
+}
+
+
+void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ++num_pending_64_bit_reloc_info_;
+ }
+ ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+}
+
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
- // further than first_const_pool_use_ + kMaxDistToPool
+ // further than constant pool instruction's reach.
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
+ // TODO(jfb) Also check 64-bit entries are in range (requires splitting
+ // them up from 32-bit entries).
no_const_pool_before_ = pc_limit;
}
@@ -2690,29 +2889,60 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
- // We emit a constant pool when:
- // * requested to do so by parameter force_emit (e.g. after each function).
- // * the distance to the first instruction accessing the constant pool is
- // kAvgDistToPool or more.
- // * no jump is required and the distance to the first instruction accessing
- // the constant pool is at least kMaxDistToPool / 2.
- ASSERT(first_const_pool_use_ >= 0);
- int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToPool &&
- (require_jump || (dist < (kMaxDistToPool / 2)))) {
- return;
- }
-
// Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
+ // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
int jump_instr = require_jump ? kInstrSize : 0;
- int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize;
+ int size_up_to_marker = jump_instr + kInstrSize;
+ int size_after_marker = num_pending_reloc_info_ * kPointerSize;
+ bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
+ // 64-bit values must be 64-bit aligned.
+ // We'll start emitting at PC: branch+marker, then 32-bit values, then
+ // 64-bit values which might need to be aligned.
+ bool require_64_bit_align = has_fp_values &&
+ (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
+ if (require_64_bit_align) {
+ size_after_marker += kInstrSize;
+ }
+ // num_pending_reloc_info_ also contains 64-bit entries, the above code
+ // therefore already counted half of the size for 64-bit entries. Add the
+ // remaining size.
+ STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
+ size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
+
+ int size = size_up_to_marker + size_after_marker;
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance from the first instruction accessing the constant pool to
+ // any of the constant pool entries will exceed its limit the next
+ // time the pool is checked. This is overly restrictive, but we don't emit
+ // constant pool entries in-order so it's conservatively correct.
+ // * the instruction doesn't require a jump after itself to jump over the
+ // constant pool, and we're getting close to running out of range.
+ if (!force_emit) {
+ ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
+ int dist = pc_offset() + size - first_const_pool_use_;
+ if (has_fp_values) {
+ if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
+ (require_jump || (dist < kMaxDistToFPPool / 2))) {
+ return;
+ }
+ } else {
+ if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
+ (require_jump || (dist < kMaxDistToIntPool / 2))) {
+ return;
+ }
+ }
+ }
+
int needed_space = size + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
@@ -2729,10 +2959,43 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
}
// Put down constant pool marker "Undefined instruction".
- emit(kConstantPoolMarker |
- EncodeConstantPoolLength(num_pending_reloc_info_));
+ // The data size helps disassembly know what to print.
+ emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker));
+
+ if (require_64_bit_align) {
+ emit(kConstantPoolMarker);
+ }
- // Emit constant pool entries.
+ // Emit 64-bit constant pool entries first: their range is smaller than
+ // 32-bit entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+
+ if (rinfo.rmode() != RelocInfo::NONE64) {
+ // 32-bit values emitted later.
+ continue;
+ }
+
+ ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment.
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
+ ASSERT((IsVldrDPcImmediateOffset(instr) &&
+ GetVldrDRegisterImmediateOffset(instr) == 0));
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ ASSERT(is_uint10(delta));
+
+ instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
+
+ const double double_data = rinfo.data64();
+ uint64_t uint_data = 0;
+ memcpy(&uint_data, &double_data, sizeof(double_data));
+ emit(uint_data & 0xFFFFFFFF);
+ emit(uint_data >> 32);
+ }
+
+ // Emit 32-bit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
@@ -2740,25 +3003,35 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
rinfo.rmode() != RelocInfo::CONST_POOL);
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ // 64-bit values emitted earlier.
+ continue;
+ }
+
Instr instr = instr_at(rinfo.pc());
- // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+
+ // 64-bit loads shouldn't get here.
+ ASSERT(!IsVldrDPcImmediateOffset(instr));
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+
if (IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0) {
- int delta = pc_ - rinfo.pc() - kPcLoadDelta;
- // 0 is the smallest delta:
- // ldr rd, [pc, #0]
- // constant pool marker
- // data
ASSERT(is_uint12(delta));
-
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ emit(rinfo.data());
} else {
ASSERT(IsMovW(instr));
+ emit(rinfo.data());
}
- emit(rinfo.data());
}
num_pending_reloc_info_ = 0;
+ num_pending_64_bit_reloc_info_ = 0;
first_const_pool_use_ = -1;
RecordComment("]");
diff --git a/src/3rdparty/v8/src/arm/assembler-arm.h b/src/3rdparty/v8/src/arm/assembler-arm.h
index 8418aee..12cee54 100644
--- a/src/3rdparty/v8/src/arm/assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/assembler-arm.h
@@ -47,6 +47,116 @@
namespace v8 {
namespace internal {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures : public AllStatic {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ static void Probe();
+
+ // Check whether a feature is supported by the target CPU.
+ static bool IsSupported(CpuFeature f) {
+ ASSERT(initialized_);
+ if (f == VFP3 && !FLAG_enable_vfp3) return false;
+ if (f == VFP2 && !FLAG_enable_vfp2) return false;
+ if (f == SUDIV && !FLAG_enable_sudiv) return false;
+ if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
+ return false;
+ }
+ if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+#ifdef DEBUG
+ // Check whether a feature is currently enabled.
+ static bool IsEnabled(CpuFeature f) {
+ ASSERT(initialized_);
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL) {
+ // When no isolate is available, work as if we're running in
+ // release mode.
+ return IsSupported(f);
+ }
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
+ return (enabled & (1u << f)) != 0;
+ }
+#endif
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+
+ public:
+ explicit Scope(CpuFeature f) {
+ unsigned mask = 1u << f;
+ // VFP2 and ARMv7 are implied by VFP3.
+ if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
+ ASSERT(CpuFeatures::IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
+ isolate_ = Isolate::UncheckedCurrent();
+ old_enabled_ = 0;
+ if (isolate_ != NULL) {
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask);
+ }
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
+ if (isolate_ != NULL) {
+ isolate_->set_enabled_cpu_features(old_enabled_);
+ }
+ }
+
+ private:
+ Isolate* isolate_;
+ unsigned old_enabled_;
+#else
+
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ class TryForceFeatureScope BASE_EMBEDDED {
+ public:
+ explicit TryForceFeatureScope(CpuFeature f)
+ : old_supported_(CpuFeatures::supported_) {
+ if (CanForce()) {
+ CpuFeatures::supported_ |= (1u << f);
+ }
+ }
+
+ ~TryForceFeatureScope() {
+ if (CanForce()) {
+ CpuFeatures::supported_ = old_supported_;
+ }
+ }
+
+ private:
+ static bool CanForce() {
+ // It's only safe to temporarily force support of CPU features
+ // when there's only a single isolate, which is guaranteed when
+ // the serializer is enabled.
+ return Serializer::enabled();
+ }
+
+ const unsigned old_supported_;
+ };
+
+ private:
+#ifdef DEBUG
+ static bool initialized_;
+#endif
+ static unsigned supported_;
+ static unsigned found_by_runtime_probing_;
+
+ friend class ExternalReference;
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -71,21 +181,24 @@ namespace internal {
// Core register
struct Register {
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
+ static const int kGPRsPerNonVFP2Double = 2;
+
+ inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.code() < kNumAllocatableRegisters);
+ ASSERT(reg.code() < kMaxNumAllocatableRegisters);
return reg.code();
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index);
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"r0",
"r1",
@@ -165,7 +278,6 @@ const Register sp = { kRegister_sp_Code };
const Register lr = { kRegister_lr_Code };
const Register pc = { kRegister_pc_Code };
-
// Single word VFP register.
struct SwVfpRegister {
bool is_valid() const { return 0 <= code_ && code_ < 32; }
@@ -190,52 +302,36 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
- static const int kNumRegisters = 16;
+ static const int kMaxNumRegisters = 32;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
- inline static int ToAllocationIndex(DwVfpRegister reg);
-
- static DwVfpRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- return from_code(index);
- }
+ // Note: the number of registers can be different at snapshot and run-time.
+ // Any code included in the snapshot must be able to run both with 16 or 32
+ // registers.
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "d0",
- "d1",
- "d2",
- "d3",
- "d4",
- "d5",
- "d6",
- "d7",
- "d8",
- "d9",
- "d10",
- "d11",
- "d12",
- "d13"
- };
- return names[index];
- }
+ inline static int ToAllocationIndex(DwVfpRegister reg);
+ static const char* AllocationIndexToString(int index);
+ inline static DwVfpRegister FromAllocationIndex(int index);
static DwVfpRegister from_code(int code) {
DwVfpRegister r = { code };
return r;
}
- // Supporting d0 to d15, can be later extended to d31.
- bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is_valid() const {
+ return 0 <= code_ && code_ < kMaxNumRegisters;
+ }
bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
SwVfpRegister low() const {
+ ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = code_ * 2;
@@ -243,6 +339,7 @@ struct DwVfpRegister {
return reg;
}
SwVfpRegister high() const {
+ ASSERT(code_ < 16);
SwVfpRegister reg;
reg.code_ = (code_ * 2) + 1;
@@ -322,6 +419,25 @@ const DwVfpRegister d12 = { 12 };
const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
+const DwVfpRegister d16 = { 16 };
+const DwVfpRegister d17 = { 17 };
+const DwVfpRegister d18 = { 18 };
+const DwVfpRegister d19 = { 19 };
+const DwVfpRegister d20 = { 20 };
+const DwVfpRegister d21 = { 21 };
+const DwVfpRegister d22 = { 22 };
+const DwVfpRegister d23 = { 23 };
+const DwVfpRegister d24 = { 24 };
+const DwVfpRegister d25 = { 25 };
+const DwVfpRegister d26 = { 26 };
+const DwVfpRegister d27 = { 27 };
+const DwVfpRegister d28 = { 28 };
+const DwVfpRegister d29 = { 29 };
+const DwVfpRegister d30 = { 30 };
+const DwVfpRegister d31 = { 31 };
+
+const Register sfpd_lo = { kRegister_r6_Code };
+const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
@@ -399,7 +515,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero()) {
return Operand(static_cast<int32_t>(0));
}
@@ -498,114 +614,6 @@ class MemOperand BASE_EMBEDDED {
friend class Assembler;
};
-// CpuFeatures keeps track of which features are supported by the target CPU.
-// Supported features must be enabled by a Scope before use.
-class CpuFeatures : public AllStatic {
- public:
- // Detect features of the target CPU. Set safe defaults if the serializer
- // is enabled (snapshots must be portable).
- static void Probe();
-
- // Check whether a feature is supported by the target CPU.
- static bool IsSupported(CpuFeature f) {
- ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- return (supported_ & (1u << f)) != 0;
- }
-
-#ifdef DEBUG
- // Check whether a feature is currently enabled.
- static bool IsEnabled(CpuFeature f) {
- ASSERT(initialized_);
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL) {
- // When no isolate is available, work as if we're running in
- // release mode.
- return IsSupported(f);
- }
- unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
- return (enabled & (1u << f)) != 0;
- }
-#endif
-
- // Enable a specified feature within a scope.
- class Scope BASE_EMBEDDED {
-#ifdef DEBUG
-
- public:
- explicit Scope(CpuFeature f) {
- unsigned mask = 1u << f;
- // VFP2 and ARMv7 are implied by VFP3.
- if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7;
- ASSERT(CpuFeatures::IsSupported(f));
- ASSERT(!Serializer::enabled() ||
- (CpuFeatures::found_by_runtime_probing_ & mask) == 0);
- isolate_ = Isolate::UncheckedCurrent();
- old_enabled_ = 0;
- if (isolate_ != NULL) {
- old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
- isolate_->set_enabled_cpu_features(old_enabled_ | mask);
- }
- }
- ~Scope() {
- ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
- if (isolate_ != NULL) {
- isolate_->set_enabled_cpu_features(old_enabled_);
- }
- }
-
- private:
- Isolate* isolate_;
- unsigned old_enabled_;
-#else
-
- public:
- explicit Scope(CpuFeature f) {}
-#endif
- };
-
- class TryForceFeatureScope BASE_EMBEDDED {
- public:
- explicit TryForceFeatureScope(CpuFeature f)
- : old_supported_(CpuFeatures::supported_) {
- if (CanForce()) {
- CpuFeatures::supported_ |= (1u << f);
- }
- }
-
- ~TryForceFeatureScope() {
- if (CanForce()) {
- CpuFeatures::supported_ = old_supported_;
- }
- }
-
- private:
- static bool CanForce() {
- // It's only safe to temporarily force support of CPU features
- // when there's only a single isolate, which is guaranteed when
- // the serializer is enabled.
- return Serializer::enabled();
- }
-
- const unsigned old_supported_;
- };
-
- private:
-#ifdef DEBUG
- static bool initialized_;
-#endif
- static unsigned supported_;
- static unsigned found_by_runtime_probing_;
-
- DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
-};
-
-
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
@@ -629,7 +637,11 @@ extern const Instr kCmpCmnFlip;
extern const Instr kAddSubFlip;
extern const Instr kAndBicFlip;
-
+struct VmovIndex {
+ unsigned char index;
+};
+const VmovIndex VmovIndexLo = { 0 };
+const VmovIndex VmovIndexHi = { 1 };
class Assembler : public AssemblerBase {
public:
@@ -647,7 +659,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
+ virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -994,10 +1006,7 @@ class Assembler : public AssemblerBase {
LFlag l = Short); // v5 and above
// Support for VFP.
- // All these APIs support S0 to S31 and D0 to D15.
- // Currently these APIs do not support extended D registers, i.e, D16 to D31.
- // However, some simple modifications can allow
- // these APIs to support D16 to D31.
+ // All these APIs support S0 to S31 and D0 to D31.
void vldr(const DwVfpRegister dst,
const Register base,
@@ -1057,8 +1066,7 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst,
double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
@@ -1066,6 +1074,10 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src,
const Condition cond = al);
void vmov(const DwVfpRegister dst,
+ const VmovIndex index,
+ const Register src,
+ const Condition cond = al);
+ void vmov(const DwVfpRegister dst,
const Register src1,
const Register src2,
const Condition cond = al);
@@ -1126,6 +1138,14 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
+ void vmla(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
+ void vmls(const DwVfpRegister dst,
+ const DwVfpRegister src1,
+ const DwVfpRegister src2,
+ const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -1272,8 +1292,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return pc_ - buffer_; }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@@ -1289,8 +1307,11 @@ class Assembler : public AssemblerBase {
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
+ static bool IsVldrDRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
+ static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+ static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset);
static bool IsStrRegisterImmediate(Instr instr);
static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
static bool IsAddRegisterImmediate(Instr instr);
@@ -1305,6 +1326,7 @@ class Assembler : public AssemblerBase {
static bool IsStrRegFpNegOffset(Instr instr);
static bool IsLdrRegFpNegOffset(Instr instr);
static bool IsLdrPcImmediateOffset(Instr instr);
+ static bool IsVldrDPcImmediateOffset(Instr instr);
static bool IsTstImmediate(Instr instr);
static bool IsCmpRegister(Instr instr);
static bool IsCmpImmediate(Instr instr);
@@ -1315,12 +1337,13 @@ class Assembler : public AssemblerBase {
static bool IsMovW(Instr instr);
// Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToPool = 4*KB;
- static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
- STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
- kMaxNumPendingRelocInfo);
+ // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
+ // PC-relative loads, thereby defining a maximum distance between the
+ // instruction and the accessed constant.
+ static const int kMaxDistToIntPool = 4*KB;
+ static const int kMaxDistToFPPool = 1*KB;
+ // All relocations could be integer, it therefore acts as the limit.
+ static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1360,7 +1383,9 @@ class Assembler : public AssemblerBase {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool)));
+ ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
@@ -1376,13 +1401,6 @@ class Assembler : public AssemblerBase {
}
private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@@ -1391,7 +1409,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@@ -1411,13 +1428,6 @@ class Assembler : public AssemblerBase {
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
-
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
@@ -1442,6 +1452,9 @@ class Assembler : public AssemblerBase {
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
+ // Number of pending reloc info entries included above which also happen to
+ // be 64-bit.
+ int num_pending_64_bit_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1478,6 +1491,8 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
UseConstantPoolMode mode = USE_CONSTANT_POOL);
+ void RecordRelocInfo(double data);
+ void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
friend class RegExpMacroAssemblerARM;
friend class RelocInfo;
diff --git a/src/3rdparty/v8/src/arm/builtins-arm.cc b/src/3rdparty/v8/src/arm/builtins-arm.cc
index 24d14e8..466c890 100644
--- a/src/3rdparty/v8/src/arm/builtins-arm.cc
+++ b/src/3rdparty/v8/src/arm/builtins-arm.cc
@@ -140,7 +140,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
__ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset));
// Field JSArray::kElementsOffset is initialized later.
- __ mov(scratch3, Operand(0, RelocInfo::NONE));
+ __ mov(scratch3, Operand::Zero());
__ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset));
if (initial_capacity == 0) {
@@ -319,7 +319,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
has_non_smi_element, finish, cant_transition_map, not_double;
// Check for array construction with zero arguments or one.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &argc_one_or_more);
// Handle construction of an empty array.
@@ -347,7 +347,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ tst(r2, r2);
__ b(ne, &not_empty_array);
__ Drop(1); // Adjust stack.
- __ mov(r0, Operand(0)); // Treat this as a call with argc of zero.
+ __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero.
__ b(&empty_array);
__ bind(&not_empty_array);
@@ -542,31 +542,65 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
+ // -- r2 : type info cell
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
- __ CompareObjectType(r2, r3, r4, MAP_TYPE);
+ __ CompareObjectType(r1, r3, r4, MAP_TYPE);
__ Assert(eq, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ if (FLAG_optimize_constructed_arrays) {
+ // We should either have undefined in r2 or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ cmp(r2, Operand(undefined_sentinel));
+ __ b(eq, &okay_here);
+ __ ldr(r3, FieldMemOperand(r2, 0));
+ __ cmp(r3, Operand(global_property_cell_map));
+ __ Assert(eq, "Expected property cell in register ebx");
+ __ bind(&okay_here);
+ }
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ tst(r0, r0);
+ __ b(ne, &not_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(&not_zero_case);
+ __ cmp(r0, Operand(1));
+ __ b(gt, &not_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
@@ -590,7 +624,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the first arguments in r0 and get rid of the rest.
Label no_arguments;
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &no_arguments);
// First args = sp[(argc - 1) * 4].
__ sub(r0, r0, Operand(1));
@@ -634,7 +668,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
__ Assert(eq, "Unexpected string wrapper instance size");
__ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand(0, RelocInfo::NONE));
+ __ cmp(r4, Operand::Zero());
__ Assert(eq, "Unexpected unused properties of string wrapper");
}
__ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
@@ -682,7 +716,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into r2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ b(&argument_is_string);
@@ -1044,9 +1078,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
+ // Symbols are "objects".
+ __ CompareInstanceType(r1, r3, SYMBOL_TYPE);
+ __ b(eq, &exit);
+
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -1097,7 +1135,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r5-r7, cp may be clobbered
// Clear the context before we push it when entering the internal frame.
- __ mov(cp, Operand(0, RelocInfo::NONE));
+ __ mov(cp, Operand::Zero());
// Enter an internal frame.
{
@@ -1141,6 +1179,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ mov(r2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1259,6 +1301,24 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ add(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ mov(pc, lr); // Jump to miss handler
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1355,7 +1415,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
@@ -1376,7 +1436,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments
// r1: function
Label shift_arguments;
- __ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION
+ __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1431,7 +1491,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Restore the function to r1, and the flag to r4.
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand(0, RelocInfo::NONE));
+ __ mov(r4, Operand::Zero());
__ jmp(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1453,11 +1513,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy
+ __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
__ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(eq, &shift_arguments);
__ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function
+ __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1501,7 +1561,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ tst(r4, r4);
__ b(eq, &function);
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r4, Operand(1));
__ b(ne, &non_proxy);
@@ -1579,7 +1639,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Push current limit and index.
__ bind(&okay);
__ push(r0); // limit
- __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index
+ __ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
@@ -1691,7 +1751,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(r1); // add function proxy as last argument
__ add(r0, r0, Operand(1));
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.cc b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
index a769f54..e7a8489 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.cc
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.cc
@@ -32,17 +32,90 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r3, r2, r1, r0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r1, r0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r0, r1 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // r1 -- constructor function
+ // r2 -- type info cell with elements kind
+ // r0 -- number of arguments to the constructor function
+ static Register registers[] = { r1, r2 };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &r0;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan);
+ Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -250,8 +323,10 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Copy the qml global object from the surrounding context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ ldr(r1,
+ MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ str(r1,
+ MemOperand(r0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -342,6 +417,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
+ AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
//
@@ -355,16 +431,28 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
- int size = JSArray::kSize + elements_size;
+
+ int size = JSArray::kSize;
+ int allocation_info_start = size;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size,
- r0,
- r1,
- r2,
- fail,
- TAG_OBJECT);
+ AllocationFlags flags = TAG_OBJECT;
+ if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+ flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+ }
+ __ AllocateInNewSpace(size, r0, r1, r2, fail, flags);
+
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
+ allocation_site_info_map())));
+ __ str(r2, FieldMemOperand(r0, allocation_info_start));
+ __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize));
+ }
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -378,7 +466,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ add(r2, r0, Operand(JSArray::kSize));
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
+ } else {
+ __ add(r2, r0, Operand(JSArray::kSize));
+ }
__ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
// Copy the elements array.
@@ -411,8 +503,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
__ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -420,8 +513,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ bind(&check_fast_elements);
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
@@ -453,7 +547,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(r3);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ add(sp, sp, Operand(3 * kPointerSize));
@@ -464,55 +560,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 3 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset));
- __ cmp(r0, Operand(size >> kPointerSizeLog2));
- __ b(ne, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(4 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -559,7 +612,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
// Subtract from 0 if source was negative.
- __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register source_ contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -572,7 +625,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
- __ mov(mantissa, Operand(0, RelocInfo::NONE));
+ __ mov(mantissa, Operand::Zero());
__ Ret();
__ bind(&not_special);
@@ -625,34 +678,16 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
// Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(lr);
}
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (r0) to d6 or r2/r3.
- LoadNumber(masm, destination,
- r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (r1) to d7 or r0/r1.
- LoadNumber(masm, destination,
- r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -705,7 +740,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, Operand(object));
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(lr);
- __ Call(stub.GetCode());
+ __ Call(stub.GetCode(masm->isolate()));
__ pop(lr);
}
@@ -756,13 +791,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register scratch2,
SwVfpRegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
+ ASSERT(!int_scratch.is(dst_mantissa));
+ ASSERT(!int_scratch.is(dst_exponent));
Label done;
@@ -771,56 +806,57 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst2 | dst1 |
+ // | dst_exponent | dst_mantissa |
// | s | exp | mantissa |
// Check for zero.
__ cmp(int_scratch, Operand::Zero());
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
+ __ mov(dst_exponent, int_scratch);
+ __ mov(dst_mantissa, int_scratch);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst1, int_scratch, scratch2);
- __ rsb(dst1, dst1, Operand(31));
+ __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
+ __ rsb(dst_mantissa, dst_mantissa, Operand(31));
// Set the exponent.
- __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst2, scratch2, scratch2,
+ __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst_exponent, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
- __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
+ __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
+ SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
+ __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst1, Operand(int_scratch, LSL, scratch2));
+ __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst2, dst2, scratch2);
+ __ orr(dst_exponent, dst_exponent, scratch2);
// Set dst1 to 0.
- __ mov(dst1, Operand::Zero());
+ __ mov(dst_mantissa, Operand::Zero());
}
__ bind(&done);
}
@@ -831,8 +867,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Destination destination,
DwVfpRegister double_dst,
DwVfpRegister double_scratch,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -848,8 +884,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
+ dst_exponent, scratch2, single_scratch);
__ b(&done);
__ bind(&obj_is_not_smi);
@@ -876,26 +912,52 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ b(ne, not_int32);
if (destination == kCoreRegisters) {
- __ vmov(dst1, dst2, double_dst);
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers..
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // Load the double value in the destination registers.
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
+ if (save_registers) {
+ // Save both output registers, because the other one probably holds
+ // an important value too.
+ __ Push(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
// Check for 0 and -0.
- __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst2));
+ Label zero;
+ __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst_mantissa));
__ cmp(scratch1, Operand::Zero());
- __ b(eq, &done);
+ __ b(eq, &zero);
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+ Label restore_input_and_miss;
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
+ &restore_input_and_miss);
- // dst1 and dst2 were trashed. Reload the double value.
- __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
+ // dst_* were trashed. Reload the double value.
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ Ldrd(dst_mantissa, dst_exponent,
+ FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ b(&done);
+
+ __ bind(&restore_input_and_miss);
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ b(not_int32);
+
+ __ bind(&zero);
+ if (save_registers) {
+ __ Drop(2);
+ }
}
__ bind(&done);
@@ -918,14 +980,15 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
@@ -972,20 +1035,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
}
+ __ b(&done);
+
+ __ bind(&maybe_undefined);
+ __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
+ __ b(ne, not_int32);
+ // |undefined| is truncated to 0.
+ __ mov(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
+ Register src_exponent,
+ Register src_mantissa,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ubfx(scratch,
- src1,
+ src_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1005,11 +1076,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ sub(tmp, scratch, Operand(src1, LSR, 31));
+ __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
__ cmp(tmp, Operand(30));
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
- __ tst(src2, Operand(0x3fffff));
+ __ tst(src_mantissa, Operand(0x3fffff));
__ b(ne, not_int32);
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1020,19 +1091,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
- src2,
+ src_mantissa,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
__ orr(dst,
dst,
- Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
// Create the mask and test the lower bits (of the higher bits).
__ rsb(scratch, scratch, Operand(32));
- __ mov(src2, Operand(1));
- __ mov(src1, Operand(src2, LSL, scratch));
- __ sub(src1, src1, Operand(1));
- __ tst(dst, src1);
+ __ mov(src_mantissa, Operand(1));
+ __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
+ __ sub(src_exponent, src_exponent, Operand(1));
+ __ tst(dst, src_exponent);
__ b(ne, not_int32);
}
@@ -1097,11 +1168,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() {
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -1122,7 +1194,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// Set the sign bit in scratch_ if the value was negative.
__ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
// Subtract from 0 if the value was negative.
- __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs);
+ __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
// We should be masking the implict first digit of the mantissa away here,
// but it just ends up combining harmlessly with the last digit of the
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
@@ -1145,7 +1217,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
non_smi_exponent += 1 << HeapNumber::kExponentShift;
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
- __ mov(ip, Operand(0, RelocInfo::NONE));
+ __ mov(ip, Operand::Zero());
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
__ Ret();
}
@@ -1156,48 +1228,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond,
- bool never_nan_nan) {
+ Condition cond) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, &not_identical);
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cond != eq || !never_nan_nan) {
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, slow);
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
- __ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1212,47 +1279,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- if (cond != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1300,7 +1365,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert lhs to a double in r2, r3.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
// Load rhs to a double in r0, r1.
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
@@ -1342,7 +1407,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(lr);
}
// Fall through to both_loaded_as_doubles.
@@ -1368,7 +1433,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE));
+ __ cmp(lhs_mantissa, Operand::Zero());
__ b(ne, &one_is_nan);
__ bind(lhs_not_nan);
@@ -1383,7 +1448,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
SetCC);
__ b(ne, &one_is_nan);
- __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
+ __ cmp(rhs_mantissa, Operand::Zero());
__ b(eq, &neither_is_nan);
__ bind(&one_is_nan);
@@ -1488,12 +1553,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ // Ensure that no non-strings have the internalized bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(r2, r2, Operand(r3));
- __ tst(r2, Operand(kIsSymbolMask));
+ __ tst(r2, Operand(kIsInternalizedMask));
__ b(ne, &return_not_equal);
}
@@ -1530,29 +1596,29 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
// r2 is object type of rhs.
- // Ensure that no non-strings have the symbol bit set.
+ // Ensure that no non-strings have the internalized bit set.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ tst(r2, Operand(kIsNotStringMask));
__ b(ne, &object_test);
- __ tst(r2, Operand(kIsSymbolMask));
+ __ tst(r2, Operand(kIsInternalizedMask));
__ b(eq, possible_strings);
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE);
__ b(ge, not_both_strings);
- __ tst(r3, Operand(kIsSymbolMask));
+ __ tst(r3, Operand(kIsInternalizedMask));
__ b(eq, possible_strings);
- // Both are symbols. We already checked they weren't the same pointer
+ // Both are internalized. We already checked they weren't the same pointer
// so they are not equal.
__ mov(r0, Operand(NOT_EQUAL));
__ Ret();
@@ -1686,29 +1752,47 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-// On entry lhs_ and rhs_ are the values to be compared.
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+// On entry r1 and r2 are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = r1;
+ Register rhs = r0;
+ Condition cc = GetCondition();
+
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ Assert(ne, "CompareStub: unexpected smi operands.");
- }
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, &not_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -1746,13 +1830,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs_, Operand(rhs_));
+ __ and_(r2, lhs, Operand(rhs));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1763,7 +1847,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -1786,7 +1870,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// If one of the sides was a NaN then the v flag is set. Load r0 with
// whatever it takes to make the comparison fail, since comparisons with NaN
// always fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -1795,62 +1879,64 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc_);
+ EmitNanCheck(masm, &lhs_not_nan, cc);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
+ __ bind(&check_for_internalized_strings);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
- // symbols.
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ // internalized strings.
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
r2,
r3,
r4,
@@ -1860,18 +1946,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // remaining cases
+ ASSERT(cc == gt || cc == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1881,6 +1967,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -1920,7 +2009,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
// Undetectable -> false.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
__ Ret(ne);
}
}
@@ -1953,14 +2042,14 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// "tos_" is a register, and contains a non zero value by default.
// Hence we only need to overwrite "tos_" with zero to return false for
// FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+ __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
} else {
Label done, not_nan, not_zero;
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
// -0 maps to false:
__ bic(
- temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC);
+ temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
__ b(ne, &not_zero);
// If exponent word is zero then the answer depends on the mantissa word.
__ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
@@ -1973,25 +2062,25 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
unsigned int shifted_exponent_mask =
HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
- __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
+ __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
__ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
// Reload exponent word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
+ __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
// If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
__ b(ne, &done);
// Load mantissa word.
__ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ cmp(temp, Operand(0, RelocInfo::NONE));
+ __ cmp(temp, Operand::Zero());
// If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ mov(tos_, Operand::Zero(), LeaveCC, ne);
__ b(ne, &done);
__ bind(&not_nan);
- __ mov(tos_, Operand(1, RelocInfo::NONE));
+ __ mov(tos_, Operand(1, RelocInfo::NONE32));
__ bind(&done);
}
__ Ret();
@@ -2014,7 +2103,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm,
// The value of a root is never NULL, so we can avoid loading a non-null
// value into tos_ when we want to return 'true'.
if (!result) {
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ mov(tos_, Operand::Zero(), LeaveCC, eq);
}
__ Ret(eq);
}
@@ -2042,17 +2131,22 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
+
+ const Register scratch = r1;
+
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
- __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(scratch);
+
+ __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vstr(reg, MemOperand(sp, i * kDoubleSize));
+ __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
}
}
const int argument_count = 1;
const int fp_argument_count = 0;
- const Register scratch = r1;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
@@ -2062,11 +2156,15 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
argument_count);
if (save_doubles_ == kSaveFPRegs) {
CpuFeatures::Scope scope(VFP2);
- for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(scratch);
+
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, MemOperand(sp, i * kDoubleSize));
+ __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne);
}
- __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
+ __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters));
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
@@ -2095,8 +2193,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
+ case UnaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
@@ -2159,7 +2257,7 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
__ b(eq, slow);
// Return '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
+ __ rsb(r0, r0, Operand::Zero());
__ Ret();
}
@@ -2176,13 +2274,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
- GenerateHeapNumberStubSub(masm);
+ GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
+ GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -2190,7 +2288,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
__ bind(&non_smi);
@@ -2202,7 +2300,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
@@ -2303,7 +2401,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&impossible);
@@ -2364,20 +2462,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
- __ mov(r0, Operand(Smi::FromInt(operands_type_)));
- __ Push(r2, r1, r0);
+ __ push(r2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -2388,59 +2489,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2450,7 +2500,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ add(right, left, Operand(right), SetCC); // Add optimistically.
__ Ret(vc);
@@ -2475,7 +2525,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
__ cmp(ip, Operand(scratch2));
__ b(ne, &not_smi_result);
// Go slow on zero result to handle -0.
- __ cmp(scratch1, Operand(0));
+ __ cmp(scratch1, Operand::Zero());
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ Ret(ne);
// We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -2486,33 +2536,112 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
// We fall through here if we multiplied a negative number with 0, because
// that would mean we should produce -0.
break;
- case Token::DIV:
+ case Token::DIV: {
+ Label div_with_sdiv;
+
+ // Check for 0 divisor.
+ __ cmp(right, Operand::Zero());
+ __ b(eq, &not_smi_result);
+
// Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
- // Check for positive and no remainder (scratch1 contains right - 1).
- __ orr(scratch2, scratch1, Operand(0x80000000u));
- __ tst(left, scratch2);
- __ b(ne, &not_smi_result);
+ __ sub(scratch1, right, Operand(1));
+ __ tst(scratch1, right);
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ __ b(ne, &div_with_sdiv);
+ // Check for no remainder.
+ __ tst(left, scratch1);
+ __ b(ne, &not_smi_result);
+ // Check for positive left hand side.
+ __ cmp(left, Operand::Zero());
+ __ b(mi, &div_with_sdiv);
+ } else {
+ __ b(ne, &not_smi_result);
+ // Check for positive and no remainder.
+ __ orr(scratch2, scratch1, Operand(0x80000000u));
+ __ tst(left, scratch2);
+ __ b(ne, &not_smi_result);
+ }
// Perform division by shifting.
__ CountLeadingZeros(scratch1, scratch1, scratch2);
__ rsb(scratch1, scratch1, Operand(31));
__ mov(right, Operand(left, LSR, scratch1));
__ Ret();
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ Label result_not_zero;
+
+ __ bind(&div_with_sdiv);
+ // Do division.
+ __ sdiv(scratch1, left, right);
+ // Check that the remainder is zero.
+ __ mls(scratch2, scratch1, right, left);
+ __ cmp(scratch2, Operand::Zero());
+ __ b(ne, &not_smi_result);
+ // Check for negative zero result.
+ __ cmp(scratch1, Operand::Zero());
+ __ b(ne, &result_not_zero);
+ __ cmp(right, Operand::Zero());
+ __ b(lt, &not_smi_result);
+ __ bind(&result_not_zero);
+ // Check for the corner case of dividing the most negative smi by -1.
+ __ cmp(scratch1, Operand(0x40000000));
+ __ b(eq, &not_smi_result);
+ // Tag and return the result.
+ __ SmiTag(right, scratch1);
+ __ Ret();
+ }
break;
- case Token::MOD:
- // Check for two positive smis.
- __ orr(scratch1, left, Operand(right));
- __ tst(scratch1, Operand(0x80000000u | kSmiTagMask));
- __ b(ne, &not_smi_result);
+ }
+ case Token::MOD: {
+ Label modulo_with_sdiv;
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // Check for x % 0.
+ __ cmp(right, Operand::Zero());
+ __ b(eq, &not_smi_result);
+
+ // Check for two positive smis.
+ __ orr(scratch1, left, Operand(right));
+ __ tst(scratch1, Operand(0x80000000u));
+ __ b(ne, &modulo_with_sdiv);
+
+ // Check for power of two on the right hand side.
+ __ sub(scratch1, right, Operand(1));
+ __ tst(scratch1, right);
+ __ b(ne, &modulo_with_sdiv);
+ } else {
+ // Check for two positive smis.
+ __ orr(scratch1, left, Operand(right));
+ __ tst(scratch1, Operand(0x80000000u));
+ __ b(ne, &not_smi_result);
- // Check for power of two on the right hand side.
- __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
+ // Check for power of two on the right hand side.
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, &not_smi_result);
+ }
- // Perform modulus by masking.
+ // Perform modulus by masking (scratch1 contains right - 1).
__ and_(right, left, Operand(scratch1));
__ Ret();
+
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ __ bind(&modulo_with_sdiv);
+ __ mov(scratch2, right);
+ // Perform modulus with sdiv and mls.
+ __ sdiv(scratch1, left, right);
+ __ mls(right, scratch1, right, left);
+ // Return if the result is not 0.
+ __ cmp(right, Operand::Zero());
+ __ Ret(ne);
+ // The result is 0, check for -0 case.
+ __ cmp(left, Operand::Zero());
+ __ Ret(pl);
+ // This is a -0 case, restore the value of right.
+ __ mov(right, scratch2);
+ // We fall through here to not_smi_result to produce -0.
+ }
break;
+ }
case Token::BIT_OR:
__ orr(right, left, Operand(right));
__ Ret();
@@ -2565,10 +2694,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2580,11 +2723,17 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2594,25 +2743,42 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to d7 or r2/r3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, d7, d8, r2, r3, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, d7, r2, r3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, d6, d8, r0, r1, heap_number_map,
+ scratch1, scratch2, s0, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, d6, r0, r1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
@@ -2621,7 +2787,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP2);
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
@@ -2645,7 +2811,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2686,7 +2852,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
@@ -2737,8 +2903,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// r2: Answer as signed int32.
@@ -2753,7 +2920,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
@@ -2778,12 +2945,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = r1;
@@ -2796,12 +2965,14 @@ void BinaryOpStub::GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, &not_smis, op, mode);
}
__ bind(&not_smis);
}
@@ -2813,14 +2984,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2828,23 +2999,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2873,7 +3035,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
@@ -2895,7 +3057,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2905,6 +3067,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
@@ -2999,16 +3170,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
// Return a heap number, or fall through to type transition or runtime
// call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
: BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
@@ -3027,12 +3199,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
@@ -3137,12 +3310,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = r5;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -3186,6 +3360,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3219,25 +3394,37 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ bind(&done);
- GenerateHeapNumberStub(masm);
+ GenerateNumberStub(masm);
}
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3245,6 +3432,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3280,61 +3468,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3347,7 +3494,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
__ mov(result, Operand(overwritable_operand));
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3425,7 +3572,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
// r0 points to the cache for the type type_.
// If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand(0, RelocInfo::NONE));
+ __ cmp(cache_entry, Operand::Zero());
__ b(eq, &invalid_cache);
#ifdef DEBUG
@@ -3604,10 +3751,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = r2;
const Register heapnumbermap = r5;
const Register heapnumber = r0;
- const DoubleRegister double_base = d1;
- const DoubleRegister double_exponent = d2;
- const DoubleRegister double_result = d3;
- const DoubleRegister double_scratch = d0;
+ const DwVfpRegister double_base = d1;
+ const DwVfpRegister double_exponent = d2;
+ const DwVfpRegister double_result = d3;
+ const DwVfpRegister double_scratch = d0;
const SwVfpRegister single_scratch = s0;
const Register scratch = r9;
const Register scratch2 = r7;
@@ -3736,8 +3883,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vmov(double_result, 1.0, scratch2);
// Get absolute value of exponent.
- __ cmp(scratch, Operand(0));
- __ mov(scratch2, Operand(0), LeaveCC, mi);
+ __ cmp(scratch, Operand::Zero());
+ __ mov(scratch2, Operand::Zero(), LeaveCC, mi);
__ sub(scratch, scratch2, scratch, LeaveCC, mi);
Label while_true;
@@ -3747,7 +3894,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
- __ cmp(exponent, Operand(0));
+ __ cmp(exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, 1.0, scratch);
__ vdiv(double_result, double_scratch, double_result);
@@ -3808,31 +3955,60 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code = NULL;
+ Code* store_buffer_overflow_code = NULL;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope2(VFP2);
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ } else {
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ }
+ ISOLATE->set_fp_stubs_generated(true);
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, value, Operand(0xf));
+ __ cmp(scratch, Operand(0xf));
+ __ b(eq, oom_label);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -3932,9 +4108,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ b(eq, &retry);
// Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- __ b(eq, throw_out_of_memory_exception);
+ JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ mov(r3, Operand(isolate->factory()->the_hole_value()));
@@ -4021,13 +4195,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
- __ mov(r0, Operand(false, RelocInfo::NONE));
+ __ mov(r0, Operand(false, RelocInfo::NONE32));
__ mov(r2, Operand(external_caught));
__ str(r0, MemOperand(r2));
// Set pending exception and r0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
+ Label already_have_failure;
+ JumpIfOOM(masm, r0, ip, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ bind(&already_have_failure);
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ str(r0, MemOperand(r2));
@@ -4397,12 +4574,177 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+void ArrayLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
+ __ b(ne, &miss);
+ receiver = r1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = r0;
+ }
+
+ StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string()));
+ __ b(ne, &miss);
+ receiver = r1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = r0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ __ cmp(r0, Operand(masm->isolate()->factory()->length_string()));
+ __ b(ne, &miss);
+ receiver = r1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- r0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = r0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss,
+ support_wrapper_);
+
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -----------------------------------
+ __ cmp(r1, Operand(masm->isolate()->factory()->length_string()));
+ __ b(ne, &miss);
+ receiver = r2;
+ value = r0;
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : value
+ // -- r1 : receiver
+ // -- r2 : key
+ // -----------------------------------
+ receiver = r1;
+ value = r0;
+ }
+ Register scratch = r3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
+ __ b(ne, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
+ __ b(eq, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::GenerateStoreMiss(masm, kind());
+}
+
+
Register InstanceofStub::left() { return r0; }
Register InstanceofStub::right() { return r1; }
+void LoadFieldStub::Generate(MacroAssembler* masm) {
+ StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_);
+ __ Ret();
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -4703,7 +5045,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(eq, &add_arguments_object);
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
@@ -4736,7 +5078,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// If there are no actual arguments, we're done.
Label done;
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(eq, &done);
// Get the parameters pointer from the stack.
@@ -4763,7 +5105,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Post-increment r4 with kPointerSize on each iteration.
__ str(r3, MemOperand(r4, kPointerSize, PostIndex));
__ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(ne, &loop);
// Return and remove the on-stack parameters.
@@ -4796,8 +5138,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
const int kSubjectOffset = 2 * kPointerSize;
const int kJSRegExpOffset = 3 * kPointerSize;
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -4815,7 +5156,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -4844,68 +5185,48 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ ldr(r2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since r2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(r2, r2, Operand(2)); // r2 was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
__ b(hi, &runtime);
- // r2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
+ // Reset offset for possibly sliced string.
+ __ mov(r9, Operand::Zero());
__ ldr(subject, MemOperand(sp, kSubjectOffset));
__ JumpIfSmi(subject, &runtime);
- Condition is_string = masm->IsObjectStringType(subject, r0);
- __ b(NegateCondition(is_string), &runtime);
- // Get the length of the string to r3.
- __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset));
-
- // r2: Number of capture registers
- // r3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(r0, &runtime);
- __ cmp(r3, Operand(r0));
- __ b(ls, &runtime);
-
- // r2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
- __ b(ne, &runtime);
- // Check that the JSArray is in fast case.
- __ ldr(last_match_info_elements,
- FieldMemOperand(r0, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ ldr(r0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
- __ b(gt, &runtime);
-
- // Reset offset for possibly sliced string.
- __ mov(r9, Operand(0));
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ mov(r3, subject); // Make a copy of the original subject string.
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // r3: subject string
+ // r0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ and_(r1,
r0,
Operand(kIsNotStringMask |
@@ -4913,77 +5234,62 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
kShortExternalStringMask),
SetCC);
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ b(eq, &seq_string);
+ __ b(eq, &seq_string); // Go to (5).
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
- __ b(lt, &cons_string);
- __ b(eq, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ b(ne, &runtime);
+ __ b(ge, &not_seq_nor_cons); // Go to (6).
- // String is sliced.
- __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ mov(r9, Operand(r9, ASR, kSmiTagSize));
- __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // r9: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ CompareRoot(r0, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r0, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r0, Operand(kStringRepresentationMask));
- __ b(ne, &external_string);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ b(ne, &external_string); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // r0: Instance type of subject string
+ // subject: sequential subject string (or look-alike, external string)
+ // r3: original subject string
+ // Load previous index and check range before r3 is overwritten. We have to
+ // use r3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(r1, &runtime);
+ __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset));
+ __ cmp(r3, Operand(r1));
+ __ b(ls, &runtime);
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+
STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
+ // (E) Carry on. String handling is done.
+ // r7: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(r7, &runtime);
- // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // r7: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ ldr(r1, MemOperand(sp, kPreviousIndexOffset));
- __ mov(r1, Operand(r1, ASR, kSmiTagSize));
-
// r1: previous index
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
// r7: code
@@ -5018,7 +5324,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 6: Set the number of capture registers to zero to force global
// regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand::Zero());
__ str(r0, MemOperand(sp, 2 * kPointerSize));
// Argument 5 (sp[4]): static offsets vector buffer.
@@ -5063,10 +5369,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
Label success;
-
__ cmp(r0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
@@ -5112,10 +5416,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ add(r1, r1, Operand(2)); // r1 was a smi.
+ __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r0, &runtime);
+ __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE);
+ __ b(ne, &runtime);
+ // Check that the JSArray is in fast case.
+ __ ldr(last_match_info_elements,
+ FieldMemOperand(r0, JSArray::kElementsOffset));
+ __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ ldr(r0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
+ __ b(gt, &runtime);
+
// r1: number of capture registers
// r4: subject string
// Store the capture count.
@@ -5129,10 +5452,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(r2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- r2,
+ subject,
r7,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, r2);
__ str(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
@@ -5172,8 +5496,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ add(sp, sp, Operand(4 * kPointerSize));
__ Ret();
- // External string. Short external strings have already been ruled out.
- // r0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set.
+ __ b(gt, &not_long_external); // Go to (8).
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
@@ -5186,15 +5519,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ b(ne, &runtime);
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into r9 and replace subject string with parent.
+ __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ mov(r9, Operand(r9, ASR, kSmiTagSize));
+ __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -5273,7 +5615,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
- __ cmp(r5, Operand(0));
+ __ cmp(r5, Operand::Zero());
__ bind(&loop);
__ b(le, &done); // Jump if r5 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
@@ -5289,12 +5631,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// r1 : the function to call
// r2 : cache cell for call target
+ ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -5328,6 +5671,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state into r3.
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(r3, r1);
+ __ b(eq, &done);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(r3, Operand(terminal_kind_sentinel));
+ __ b(ne, &miss);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(eq, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(r3);
+ __ cmp(r1, r3);
+ __ b(ne, &not_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ GetInitialFastElementsKind());
+ __ mov(r3, Operand(initial_kind_sentinel));
+ __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ __ b(&done);
+
+ __ bind(&not_array_function);
+ __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
// r2 : cache cell for call target
@@ -5360,7 +5779,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Invoke the function now.
@@ -5399,8 +5822,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
__ b(ne, &non_function);
__ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE));
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ mov(r2, Operand::Zero());
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY);
__ SetCallKind(r5, CALL_AS_METHOD);
{
@@ -5414,7 +5837,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ bind(&non_function);
__ str(r1, MemOperand(sp, argc_ * kPointerSize));
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
@@ -5435,13 +5858,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ b(ne, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2;
+ __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
// r0: number of arguments
// r1: called object
@@ -5457,55 +5886,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ mov(r2, Operand::Zero());
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
- stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
- ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
- (lhs_.is(r1) && rhs_.is(r0)));
- return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
- | RegisterField::encode(lhs_.is(r0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5604,10 +5991,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ tst(code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -5638,23 +6025,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5668,7 +6038,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (!ascii) {
__ add(count, count, Operand(count), SetCC);
} else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
+ __ cmp(count, Operand::Zero());
}
__ b(eq, &done);
@@ -5723,7 +6093,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
if (!ascii) {
__ add(count, count, Operand(count), SetCC);
} else {
- __ cmp(count, Operand(0, RelocInfo::NONE));
+ __ cmp(count, Operand::Zero());
}
__ b(eq, &done);
@@ -5834,7 +6204,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5847,7 +6217,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ sub(scratch, c1, Operand(static_cast<int>('0')));
__ cmp(scratch, Operand(static_cast<int>('9' - '0')));
@@ -5875,43 +6245,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load symbol table
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load string table
+ // Load address of first element of the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
__ mov(mask, Operand(mask, ASR, 1));
__ sub(mask, mask, Operand(1));
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ add(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+ // Calculate untagged address of the first element of the string table.
+ Register first_string_table_element = string_table;
+ __ add(first_string_table_element, string_table,
+ Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
+ // first_string_table_element: address of the first element of
+ // the string table
// undefined: the undefined object
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
if (i > 0) {
- __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
@@ -5919,9 +6289,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ and_(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ ldr(candidate,
- MemOperand(first_symbol_table_element,
+ MemOperand(first_string_table_element,
candidate,
LSL,
kPointerSizeLog2));
@@ -5937,7 +6307,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(ip, candidate);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole");
+ __ Assert(eq, "oddball in string table is not undefined or the hole");
}
__ jmp(&next_probe[i]);
@@ -5955,9 +6325,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
__ cmp(chars, scratch);
- __ b(eq, &found_in_symbol_table);
+ __ b(eq, &found_in_string_table);
__ bind(&next_probe[i]);
}
@@ -5966,7 +6336,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ Move(r0, result);
}
@@ -6062,6 +6432,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
ASSERT(is_string == eq);
__ b(NegateCondition(is_string), &runtime);
+ Label single_char;
+ __ cmp(r2, Operand(1));
+ __ b(eq, &single_char);
+
// Short-cut for the case of trivial substring.
Label return_r0;
// r0: original string
@@ -6091,7 +6465,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ b(ne, &sliced_string);
// Cons string. Check whether it is flat, then fetch first part.
__ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset));
- __ CompareRoot(r5, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(r5, Heap::kempty_stringRootIndex);
__ b(ne, &runtime);
__ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset));
// Update instance type.
@@ -6168,8 +6542,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
@@ -6183,13 +6557,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
@@ -6215,12 +6589,25 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&return_r0);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Drop(3);
__ Ret();
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // r0: original string
+ // r1: instance type
+ // r2: length
+ // r3: from index (untagged)
+ __ SmiTag(r3, r3);
+ StringCharAtGenerator generator(
+ r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ Drop(3);
+ __ Ret();
+ generator.SkipSlow(masm, &runtime);
}
@@ -6246,7 +6633,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6279,7 +6666,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ cmp(min_length, Operand(0));
+ __ cmp(min_length, Operand::Zero());
__ b(eq, &compare_lengths);
// Compare loop.
@@ -6314,7 +6701,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6451,8 +6838,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ add(r6, r2, Operand(r3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return a string here.
__ cmp(r6, Operand(2));
__ b(ne, &longer_than_two);
@@ -6467,13 +6854,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
+ __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@@ -6487,7 +6874,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6574,10 +6961,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r7,
r0,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &first_prepared);
@@ -6590,10 +6977,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r1,
r1,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &second_prepared);
@@ -6616,7 +7003,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(eq, &non_ascii_string_add_flat_result);
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// r0: result string.
// r7: first character of first string.
// r1: first character of second string.
@@ -6707,7 +7094,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6727,32 +7114,54 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ and_(r2, r1, Operand(r0));
- __ JumpIfSmi(r2, &generic_stub);
- __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined1);
- __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
- __ b(ne, &maybe_undefined2);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(r0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP3 is unsupported.
+ // stub if NaN is involved or VFP2 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Load left and right operand
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ sub(r2, r0, Operand(kHeapObjectTag));
__ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
+ SwVfpRegister single_scratch = d2.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d1, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
+ single_scratch = d3.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d0, single_scratch);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
@@ -6766,14 +7175,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
+ __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
@@ -6790,8 +7201,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6803,17 +7214,68 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp1, Operand(tmp2));
- __ tst(tmp1, Operand(kIsSymbolMask));
+ __ tst(tmp1, Operand(kIsInternalizedMask));
__ b(eq, &miss);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
+ __ cmp(left, right);
+ // Make sure r0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(r0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = r1;
+ Register right = r0;
+ Register tmp1 = r2;
+ Register tmp2 = r3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ Label succeed1;
+ __ tst(tmp1, Operand(kIsInternalizedMask));
+ __ b(ne, &succeed1);
+ __ cmp(tmp1, Operand(SYMBOL_TYPE));
+ __ b(ne, &miss);
+ __ bind(&succeed1);
+
+ Label succeed2;
+ __ tst(tmp2, Operand(kIsInternalizedMask));
+ __ b(ne, &succeed2);
+ __ cmp(tmp2, Operand(SYMBOL_TYPE));
+ __ b(ne, &miss);
+ __ bind(&succeed2);
+
+ // Unique names are compared by identity.
__ cmp(left, right);
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6829,7 +7291,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6865,13 +7327,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized strings. If they are, we're done
// because we already know they are not identical.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp3, tmp1, Operand(tmp2));
- __ tst(tmp3, Operand(kIsSymbolMask));
+ __ tst(tmp3, Operand(kIsInternalizedMask));
// Make sure r0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
ASSERT(right.is(r0));
@@ -6907,7 +7369,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -7001,8 +7463,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET));
+ intptr_t code =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ __ mov(lr, Operand(code, RelocInfo::CODE_TARGET));
// Prevent literal pool emission during calculation of return address.
Assembler::BlockConstPoolScope block_const_pool(masm);
@@ -7069,11 +7532,11 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, tmp);
__ b(eq, &the_hole);
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ tst(entity_name, Operand(kIsSymbolMask));
+ __ tst(entity_name, Operand(kIsInternalizedMask));
__ b(eq, miss);
__ bind(&the_hole);
@@ -7093,7 +7556,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
@@ -7169,7 +7632,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
@@ -7242,11 +7705,11 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ b(eq, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ tst(entry_key, Operand(kIsSymbolMask));
+ __ tst(entry_key, Operand(kIsInternalizedMask));
__ b(eq, &maybe_in_dictionary);
}
}
@@ -7280,7 +7743,6 @@ struct AheadOfTimeWriteBarrierStubList {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET },
- { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
@@ -7336,13 +7798,14 @@ bool StoreBufferOverflowStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7351,7 +7814,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7456,12 +7919,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(r1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ ldr(r1, MemOperand(address, 0));
- }
+ __ Move(r1, address);
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7619,7 +8077,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3, r1,
+ __ StoreNumberToDoubleElements(r0, r3,
// Overwrites all regs after this.
r5, r6, r7, r9, r2,
&slow_elements);
@@ -7627,9 +8085,24 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ ASSERT(!Serializer::enabled());
+ bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ ldr(r1, MemOperand(fp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
+ __ add(sp, sp, r1);
+ __ Ret();
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
- PredictableCodeSizeScope predictable(masm);
+ PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
diff --git a/src/3rdparty/v8/src/arm/code-stubs-arm.h b/src/3rdparty/v8/src/arm/code-stubs-arm.h
index 3e79624..f952756 100644
--- a/src/3rdparty/v8/src/arm/code-stubs-arm.h
+++ b/src/3rdparty/v8/src/arm/code-stubs-arm.h
@@ -36,7 +36,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -58,7 +58,7 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -66,7 +66,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -119,9 +119,9 @@ class UnaryOpStub: public CodeStub {
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateNumberStubSub(MacroAssembler* masm);
+ void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@@ -142,108 +142,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_vfp2_ = CpuFeatures::IsSupported(VFP2);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_vfp2_(VFP2Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_vfp2_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class VFP2Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | VFP2Bits::encode(use_vfp2_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -274,14 +172,14 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -321,7 +219,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -344,7 +242,7 @@ class StringAddStub: public CodeStub {
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -357,7 +255,7 @@ class SubStringStub: public CodeStub {
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -397,7 +295,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -407,7 +305,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
scratch_(scratch) { }
bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@@ -431,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -457,7 +355,7 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -481,7 +379,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -571,12 +469,15 @@ class RecordWriteStub: public CodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
CpuFeatures::Scope scope(VFP2);
masm->sub(sp,
sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
// Save all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ // TODO(hans): We should probably save d0 too. And maybe use vstm.
+ for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
@@ -586,15 +487,18 @@ class RecordWriteStub: public CodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
CpuFeatures::Scope scope(VFP2);
// Restore all VFP registers except d0.
- for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
+ // TODO(hans): We should probably restore d0 too. And maybe use vldm.
+ for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
}
masm->add(sp,
sp,
- Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
+ Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
@@ -613,7 +517,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@@ -672,7 +576,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@@ -691,7 +595,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@@ -724,20 +628,6 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
- // Loads objects from r0 and r1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
- // floating point registers VFP3 must be supported. If core registers are
- // requested when VFP3 is supported d6 and d7 will still be scratched. If
- // either r0 or r1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with r0 and r1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -836,7 +726,12 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- private:
+ // Loads the objects from |object| into floating point registers.
+ // Depending on |destination| the value ends up either in |dst| or
+ // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
+ // must be supported. If kCoreRegisters are requested and VFP3 is
+ // supported, |dst| will be scratched. If |object| is neither smi nor
+ // heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
@@ -850,7 +745,7 @@ class FloatingPointHelper : public AllStatic {
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.cc b/src/3rdparty/v8/src/arm/codegen-arm.cc
index 209e151..6e3c635 100644
--- a/src/3rdparty/v8/src/arm/codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/codegen-arm.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
+#include "simulator-arm.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_arm_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFP(
+ fast_exp_arm_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(VFP2)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ CpuFeatures::Scope use_vfp(VFP2);
+ DwVfpRegister input = d0;
+ DwVfpRegister result = d1;
+ DwVfpRegister double_scratch1 = d2;
+ DwVfpRegister double_scratch2 = d3;
+ Register temp1 = r4;
+ Register temp2 = r5;
+ Register temp3 = r6;
+
+ if (masm.use_eabi_hardfloat()) {
+ // Input value is in d0 anyway, nothing to do.
+ } else {
+ __ vmov(input, r0, r1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (masm.use_eabi_hardfloat()) {
+ __ vmov(d0, result);
+ } else {
+ __ vmov(r0, r1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_arm_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+#undef __
+
+
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -73,8 +142,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
+#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -83,6 +155,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- r3 : target map, scratch for subsequent call
// -- r4 : scratch (elements)
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_site_info_found != NULL);
+ __ TestJSArrayForAllocationSiteInfo(r2, r4);
+ __ b(eq, allocation_site_info_found);
+ }
+
// Set transitioned map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
__ RecordWriteField(r2,
@@ -97,7 +175,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -109,6 +187,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(r2, r4);
+ __ b(eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
@@ -123,27 +206,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(r5, LSL, 2));
- __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize));
- __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
+ __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
+ __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
// r6: destination FixedDoubleArray, not tagged as heap object.
- // Align the array conveniently for doubles.
- // Store a filler value in the unused memory.
- Label aligned, aligned_done;
- __ tst(r6, Operand(kDoubleAlignmentMask));
- __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map()));
- __ b(eq, &aligned);
- // Store at the beginning of the allocated memory and update the base pointer.
- __ str(ip, MemOperand(r6, kPointerSize, PostIndex));
- __ b(&aligned_done);
-
- __ bind(&aligned);
- // Store the filler at the end of the allocated memory.
- __ sub(lr, lr, Operand(kPointerSize));
- __ str(ip, MemOperand(r6, lr));
-
- __ bind(&aligned_done);
-
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
@@ -192,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
r3,
r9,
- kLRHasBeenSaved,
+ kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -251,7 +317,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -262,6 +328,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(r2, r4);
+ __ b(eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
@@ -397,7 +468,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kEmptyStringRootIndex);
+ __ CompareRoot(result, Heap::kempty_stringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -416,7 +487,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string);
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -450,6 +521,122 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ tst(index, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi index");
+ __ tst(value, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi value");
+
+ __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
+ __ cmp(index, ip);
+ __ Check(lt, "Index is too large");
+
+ __ cmp(index, Operand(Smi::FromInt(0)));
+ __ Check(ge, "Index is negative");
+
+ __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+ __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type");
+ }
+
+ __ add(ip,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(value, value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
+ __ strb(value, MemOperand(ip, index, LSR, 1));
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ strh(value, MemOperand(ip, index));
+ }
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ vldr(double_scratch1, ExpConstant(0, temp3));
+ __ vmov(result, kDoubleRegZero);
+ __ VFPCompareAndSetFlags(double_scratch1, input);
+ __ b(ge, &done);
+ __ vldr(double_scratch2, ExpConstant(1, temp3));
+ __ VFPCompareAndSetFlags(input, double_scratch2);
+ __ vldr(result, ExpConstant(2, temp3));
+ __ b(ge, &done);
+ __ vldr(double_scratch1, ExpConstant(3, temp3));
+ __ vldr(result, ExpConstant(4, temp3));
+ __ vmul(double_scratch1, double_scratch1, input);
+ __ vadd(double_scratch1, double_scratch1, result);
+ __ vmov(temp2, temp1, double_scratch1);
+ __ vsub(double_scratch1, double_scratch1, result);
+ __ vldr(result, ExpConstant(6, temp3));
+ __ vldr(double_scratch2, ExpConstant(5, temp3));
+ __ vmul(double_scratch1, double_scratch1, double_scratch2);
+ __ vsub(double_scratch1, double_scratch1, input);
+ __ vsub(result, result, double_scratch1);
+ __ vmul(input, double_scratch1, double_scratch1);
+ __ vmul(result, result, input);
+ __ mov(temp1, Operand(temp2, LSR, 11));
+ __ vldr(double_scratch2, ExpConstant(7, temp3));
+ __ vmul(result, result, double_scratch2);
+ __ vsub(result, result, double_scratch1);
+ __ vldr(double_scratch2, ExpConstant(8, temp3));
+ __ vadd(result, result, double_scratch2);
+ __ movw(ip, 0x7ff);
+ __ and_(temp2, temp2, Operand(ip));
+ __ add(temp1, temp1, Operand(0x3ff));
+ __ mov(temp1, Operand(temp1, LSL, 20));
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
+ __ add(temp3, temp3, Operand(kPointerSize));
+ __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
+ __ orr(temp1, temp1, temp2);
+ __ vmov(input, ip, temp1);
+ __ vmul(result, result, input);
+ __ bind(&done);
+}
+
#undef __
// add(r0, pc, Operand(-8))
@@ -464,6 +651,7 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
*length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
if (!initialized) {
CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ PredictableCodeSizeScope scope(patcher.masm(), *length);
patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
@@ -473,29 +661,6 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
}
-byte* Code::FindPlatformCodeAgeSequence() {
- byte* start = instruction_start();
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (!memcmp(start, young_sequence, young_length) ||
- Memory::uint32_at(start) == kCodeAgePatchFirstInstruction) {
- return start;
- } else {
- byte* start_after_strict = NULL;
- if (kind() == FUNCTION) {
- start_after_strict = start + kSizeOfFullCodegenStrictModePrologue;
- } else {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- start_after_strict = start + kSizeOfOptimizedStrictModePrologue;
- }
- ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
- Memory::uint32_at(start_after_strict) ==
- kCodeAgePatchFirstInstruction);
- return start_after_strict;
- }
-}
-
-
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
diff --git a/src/3rdparty/v8/src/arm/codegen-arm.h b/src/3rdparty/v8/src/arm/codegen-arm.h
index c77844d..75899a9 100644
--- a/src/3rdparty/v8/src/arm/codegen-arm.h
+++ b/src/3rdparty/v8/src/arm/codegen-arm.h
@@ -34,9 +34,6 @@
namespace v8 {
namespace internal {
-static const int kSizeOfFullCodegenStrictModePrologue = 16;
-static const int kSizeOfOptimizedStrictModePrologue = 16;
-
// Forward declarations
class CompilationInfo;
@@ -47,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -71,6 +72,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -91,6 +94,22 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DwVfpRegister input,
+ DwVfpRegister result,
+ DwVfpRegister double_scratch1,
+ DwVfpRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/src/3rdparty/v8/src/arm/constants-arm.cc b/src/3rdparty/v8/src/arm/constants-arm.cc
index bf9da23..cdca1f5 100644
--- a/src/3rdparty/v8/src/arm/constants-arm.cc
+++ b/src/3rdparty/v8/src/arm/constants-arm.cc
@@ -87,8 +87,8 @@ const char* Registers::Name(int reg) {
}
-// Support for VFP registers s0 to s31 (d0 to d15).
-// Note that "sN:sM" is the same as "dN/2"
+// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
+// Note that "sN:sM" is the same as "dN/2" up to d15.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* VFPRegisters::names_[kNumVFPRegisters] = {
@@ -97,7 +97,9 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = {
"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
- "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15"
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"
};
diff --git a/src/3rdparty/v8/src/arm/constants-arm.h b/src/3rdparty/v8/src/arm/constants-arm.h
index 03876f9..841df92 100644
--- a/src/3rdparty/v8/src/arm/constants-arm.h
+++ b/src/3rdparty/v8/src/arm/constants-arm.h
@@ -104,7 +104,7 @@ const int kNumRegisters = 16;
// VFP support.
const int kNumVFPSingleRegisters = 32;
-const int kNumVFPDoubleRegisters = 16;
+const int kNumVFPDoubleRegisters = 32;
const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters;
// PC is register 15.
@@ -269,7 +269,8 @@ enum {
kCoprocessorMask = 15 << 8,
kOpCodeMask = 15 << 21, // In data-processing instructions.
kImm24Mask = (1 << 24) - 1,
- kOff12Mask = (1 << 12) - 1
+ kOff12Mask = (1 << 12) - 1,
+ kOff8Mask = (1 << 8) - 1
};
@@ -466,6 +467,9 @@ extern const Instr kMovLrPc;
// ldr rd, [pc, #offset]
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+// vldr dd, [pc, #offset]
+extern const Instr kVldrDPCMask;
+extern const Instr kVldrDPCPattern;
// blxcc rm
extern const Instr kBlxRegMask;
diff --git a/src/3rdparty/v8/src/arm/debug-arm.cc b/src/3rdparty/v8/src/arm/debug-arm.cc
index c2941be..e9a65b2 100644
--- a/src/3rdparty/v8/src/arm/debug-arm.cc
+++ b/src/3rdparty/v8/src/arm/debug-arm.cc
@@ -161,7 +161,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
#ifdef DEBUG
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
- __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
+ __ mov(r0, Operand::Zero()); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
diff --git a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
index 19667b9..2e1e3e3 100644
--- a/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
+++ b/src/3rdparty/v8/src/arm/deoptimizer-arm.cc
@@ -44,11 +44,14 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction* function) {
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
AssertNoAllocation no_allocation;
- if (!function->IsOptimized()) return;
+ ASSERT(function->IsOptimized());
+ ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
@@ -72,17 +75,17 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
// We need calls to have a predictable size in the unoptimized code, but
// this is optimized code, so we don't have to have a predictable size.
int call_size_in_bytes =
MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
- RelocInfo::NONE);
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -91,8 +94,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif
}
- Isolate* isolate = code->GetIsolate();
-
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
@@ -114,7 +115,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
-static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@@ -123,24 +123,21 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // The call of the stack guard check has the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // 2a 00 00 01 bcs ok
+ // The back edge bookkeeping code matches the pattern:
+ //
+ // <decrement profiling counter>
+ // 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
// We patch the code to the following form:
- // e1 5d 00 0c cmp sp, <limit>
+ //
+ // <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
@@ -177,15 +174,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- patcher.masm()->b(+4, cs);
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
@@ -222,7 +213,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -256,7 +247,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -348,7 +339,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -365,99 +356,184 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+
+ // | | saved frame (fp) | | saved frame (fp) |
+ // | +=========================+<-fp +=========================+<-fp
+ // | | JSFunction context | | JSFunction context |
+ // v +-------------------------+ +-------------------------|
+ // | COMPILED_STUB marker | | STUB_FAILURE marker |
+ // +-------------------------+ +-------------------------+
+ // | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
+ // |-------------------------|<-sp +-------------------------+
+ // | caller args pointer |
+ // +-------------------------+
+ // | caller stack param 1 |
+ // parameters in registers +-------------------------+
+ // and spilled to stack | .... |
+ // +-------------------------+
+ // | caller stack param n |
+ // +-------------------------+<-sp
+ // r0 = number of parameters
+ // r1 = failure handler address
+ // fp = saved frame
+ // cp = JSFunction context
+ //
- // Allocate and store the output frame description.
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+
+ // The output frame must have room for all pushed register parameters
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
+ }
+
+ // The stub failure trampoline is a single frame.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
+ height_in_bytes;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ // Read caller's PC (JSFunction continuation) from the input frame.
+ intptr_t input_frame_offset = input_frame_size - kPointerSize;
+ intptr_t output_frame_offset = output_frame_size - kPointerSize;
+ intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Read caller's FP from the input frame, and set this frame's FP.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(fp.code());
+ output_frame->SetRegister(fp.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
+ // The context can be gotten from the input frame.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(cp.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
+ // A marker value is used in place of the function.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
+ int caller_arg_count = 0;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ caller_arg_count =
+ input_->GetRegister(descriptor->stack_parameter_count_->code());
}
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
+ // Build the Arguments object for the caller's parameters and a pointer to it.
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ output_frame_offset -= kPointerSize;
+ value = caller_arg_count;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- ASSERT(0 == output_offset);
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr - (output_frame_size - output_frame_offset) -
+ StandardFrameConstants::kMarkerOffset + kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
+ // Copy the register parameters to the failure frame.
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ output_frame_offset -= kPointerSize;
+ DoTranslateCommand(iterator, 0, output_frame_offset);
+ }
+
+ ASSERT(0 == output_frame_offset);
+
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->register_param_count_;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ params++;
+ }
+ output_frame->SetRegister(r0.code(), params);
+ output_frame->SetRegister(r1.code(), handler);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ int extra = descriptor->extra_expression_stack_count_;
+ StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -514,7 +590,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -523,7 +599,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -532,7 +608,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
@@ -541,7 +617,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -550,7 +626,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -559,7 +635,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
top_address + output_offset, output_offset, value);
}
@@ -569,7 +645,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
@@ -583,124 +659,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type,
- // code object, see MacroAssembler::EnterFrame). For a setter stub frames we
- // need one additional entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
// This code is very similar to ia32 code, but relies on register names (fp, sp)
// and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@@ -718,7 +676,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -782,7 +740,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -805,7 +763,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -823,7 +781,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -836,7 +794,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -888,7 +846,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -908,7 +866,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
- CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -916,23 +873,25 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
const int kDoubleRegsSize =
- kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
-
- // Save all VFP registers before messing with them.
- DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
- DwVfpRegister last =
- DwVfpRegister::FromAllocationIndex(
- DwVfpRegister::kNumAllocatableRegisters - 1);
- ASSERT(last.code() > first.code());
- ASSERT((last.code() - first.code()) ==
- (DwVfpRegister::kNumAllocatableRegisters - 1));
-#ifdef DEBUG
- for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
- ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
- (DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
+ kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Save all allocatable VFP registers before messing with them.
+ ASSERT(kDoubleRegZero.code() == 14);
+ ASSERT(kScratchDoubleReg.code() == 15);
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d13);
+ } else {
+ __ sub(sp, sp, Operand(kDoubleRegsSize));
}
-#endif
- __ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -949,7 +908,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
if (type() == EAGER) {
- __ mov(r3, Operand(0));
+ __ mov(r3, Operand::Zero());
// Correct one word for bailout id.
__ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
} else if (type() == OSR) {
@@ -991,14 +950,17 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Copy VFP registers to
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ vldr(d0, sp, src_offset);
+ __ vstr(d0, r1, dst_offset);
+ }
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -1019,10 +981,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ b(&pop_loop_header);
__ bind(&pop_loop);
__ pop(r4);
__ str(r4, MemOperand(r3, 0));
__ add(r3, r3, Operand(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(r2, sp);
__ b(ne, &pop_loop);
@@ -1039,27 +1004,49 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: r0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: r4 = current "FrameDescription** output_",
// r1 = one past the last FrameDescription**.
__ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
- __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_.
- __ add(r1, r0, Operand(r1, LSL, 2));
+ __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
+ __ add(r1, r4, Operand(r1, LSL, 2));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: r2 = current FrameDescription*, r3 = loop index.
- __ ldr(r2, MemOperand(r0, 0)); // output_[ix]
+ __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
__ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(r3, r3, Operand(sizeof(uint32_t)));
__ add(r6, r2, Operand(r3));
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
__ push(r7);
- __ cmp(r3, Operand(0));
+ __ bind(&inner_loop_header);
+ __ cmp(r3, Operand::Zero());
__ b(ne, &inner_push_loop); // test for gt?
- __ add(r0, r0, Operand(kPointerSize));
- __ cmp(r0, r1);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ cmp(r4, r1);
__ b(lt, &outer_push_loop);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
+
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+ int src_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ if (i == kDoubleRegZero.code()) continue;
+ if (i == kScratchDoubleReg.code()) continue;
+
+ const DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
+ src_offset += kDoubleSize;
+ }
+ }
+
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
__ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
diff --git a/src/3rdparty/v8/src/arm/disasm-arm.cc b/src/3rdparty/v8/src/arm/disasm-arm.cc
index af2ed52..dec62b3 100644
--- a/src/3rdparty/v8/src/arm/disasm-arm.cc
+++ b/src/3rdparty/v8/src/arm/disasm-arm.cc
@@ -192,7 +192,7 @@ void Decoder::PrintSRegister(int reg) {
Print(VFPRegisters::Name(reg, false));
}
-// Print the VFP D register name according to the active name converter.
+// Print the VFP D register name according to the active name converter.
void Decoder::PrintDRegister(int reg) {
Print(VFPRegisters::Name(reg, true));
}
@@ -381,7 +381,16 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
} else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
- reg = instr->VFPDRegValue(precision);
+ if ((instr->TypeValue() == 7) &&
+ (instr->Bit(24) == 0x0) &&
+ (instr->Bits(11, 9) == 0x5) &&
+ (instr->Bit(4) == 0x1)) {
+ // vmov.32 has Vd in a different place.
+ reg = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ } else {
+ reg = instr->VFPDRegValue(precision);
+ }
+
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
@@ -1098,6 +1107,8 @@ int Decoder::DecodeType7(Instruction* instr) {
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
+// Dd = vmla(Dn, Dm)
+// Dd = vmls(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@@ -1113,16 +1124,16 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) {
// vmov register to register.
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'Dm");
+ Format(instr, "vmov'cond.f64 'Dd, 'Dm");
} else {
- Format(instr, "vmov.f32'cond 'Sd, 'Sm");
+ Format(instr, "vmov'cond.f32 'Sd, 'Sm");
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
- Format(instr, "vabs.f64'cond 'Dd, 'Dm");
+ Format(instr, "vabs'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
- Format(instr, "vneg.f64'cond 'Dd, 'Dm");
+ Format(instr, "vneg'cond.f64 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@@ -1134,10 +1145,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->Opc3Value() & 0x1)) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
- Format(instr, "vsqrt.f64'cond 'Dd, 'Dm");
+ Format(instr, "vsqrt'cond.f64 'Dd, 'Dm");
} else if (instr->Opc3Value() == 0x0) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmov.f64'cond 'Dd, 'd");
+ Format(instr, "vmov'cond.f64 'Dd, 'd");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1147,22 +1158,34 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else if (instr->Opc1Value() == 0x3) {
if (instr->SzValue() == 0x1) {
if (instr->Opc3Value() & 0x1) {
- Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm");
} else {
- Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm");
}
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm");
+ } else {
+ Unknown(instr); // Not used by V8.
+ }
+ } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) {
+ if (instr->SzValue() == 0x1) {
+ Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
- Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
+ Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm");
} else {
Unknown(instr); // Not used by V8.
}
@@ -1173,6 +1196,14 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ if (instr->Bit(21) == 0x0) {
+ Format(instr, "vmov'cond.32 'Dd[0], 'rt");
+ } else {
+ Format(instr, "vmov'cond.32 'Dd[1], 'rt");
+ }
} else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
(instr->Bits(19, 16) == 0x1)) {
@@ -1220,9 +1251,9 @@ void Decoder::DecodeVCMP(Instruction* instr) {
if (dp_operation && !raise_exception_for_qnan) {
if (instr->Opc2Value() == 0x4) {
- Format(instr, "vcmp.f64'cond 'Dd, 'Dm");
+ Format(instr, "vcmp'cond.f64 'Dd, 'Dm");
} else if (instr->Opc2Value() == 0x5) {
- Format(instr, "vcmp.f64'cond 'Dd, #0.0");
+ Format(instr, "vcmp'cond.f64 'Dd, #0.0");
} else {
Unknown(instr); // invalid
}
@@ -1239,9 +1270,9 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
bool double_to_single = (instr->SzValue() == 1);
if (double_to_single) {
- Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm");
}
}
@@ -1258,15 +1289,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm");
} else {
- Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm");
+ Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm");
}
}
} else {
@@ -1274,15 +1305,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (dp_operation) {
if (unsigned_integer) {
- Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm");
} else {
- Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm");
+ Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm");
}
} else {
if (unsigned_integer) {
- Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm");
} else {
- Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm");
+ Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm");
}
}
}
@@ -1336,7 +1367,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
Unknown(instr); // Not used by V8.
} else if (instr->HasL()) {
Format(instr, "vmov'cond 'rt, 'rn, 'Dm");
@@ -1345,6 +1376,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0x8:
+ case 0xA:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]");
} else {
@@ -1352,6 +1384,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
case 0xC:
+ case 0xE:
if (instr->HasL()) {
Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]");
} else {
@@ -1360,7 +1393,10 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
break;
case 0x4:
case 0x5:
- case 0x9: {
+ case 0x6:
+ case 0x7:
+ case 0x9:
+ case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
diff --git a/src/3rdparty/v8/src/arm/frames-arm.h b/src/3rdparty/v8/src/arm/frames-arm.h
index a10acd0..ee9fc0e 100644
--- a/src/3rdparty/v8/src/arm/frames-arm.h
+++ b/src/3rdparty/v8/src/arm/frames-arm.h
@@ -134,20 +134,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = 1 * kPointerSize;
- static const int kCallerSPOffset = 2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/src/3rdparty/v8/src/arm/full-codegen-arm.cc b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
index 9a7b116..36580c7 100644
--- a/src/3rdparty/v8/src/arm/full-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/full-codegen-arm.cc
@@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -138,7 +138,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -149,15 +149,12 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
- Label begin;
- __ bind(&begin);
- __ cmp(r5, Operand(0));
+ __ cmp(r5, Operand::Zero());
__ b(eq, &ok);
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
- ASSERT_EQ(kSizeOfFullCodegenStrictModePrologue, ok.pos() - begin.pos());
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -167,14 +164,19 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
- // The following four instructions must remain together and unmodified for
- // code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust fp to point to caller's fp.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ info->set_prologue_offset(masm_->pc_offset());
+ {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
{ Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) {
@@ -291,7 +293,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -346,42 +348,31 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -443,7 +434,8 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- PredictableCodeSizeScope predictable(masm_);
+ // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
+ PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -684,7 +676,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_false,
Label* fall_through) {
ToBooleanStub stub(result_register());
- __ CallStub(&stub);
+ __ CallStub(&stub, condition->test_id());
__ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
@@ -813,7 +805,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
case Variable::PARAMETER:
@@ -874,7 +867,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
}
@@ -920,35 +914,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(r1, Operand(instance));
- __ str(r1, ContextOperand(cp, variable->index()));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
+ __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ str(r1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ r1,
+ r3,
+ kLRHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -991,6 +983,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1040,11 +1040,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ b(ne, &next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@@ -1169,7 +1169,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(r1, cell);
__ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@@ -1245,7 +1246,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1341,7 +1342,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ bind(&fast);
}
- __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
@@ -1398,9 +1401,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@@ -1428,7 +1431,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
- __ ldr(r0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1584,7 +1589,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_properties));
@@ -1595,12 +1600,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
@@ -1634,7 +1640,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(r2, Operand(key->handle()));
@@ -1724,7 +1730,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
+ length);
__ CallStub(&stub);
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
@@ -1735,10 +1743,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1944,7 +1959,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -1996,7 +2011,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
- __ cmp(scratch1, Operand(0));
+ __ cmp(scratch1, Operand::Zero());
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@@ -2028,7 +2043,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(r1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(r0);
@@ -2036,7 +2051,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2096,7 +2111,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(r2, Operand(var->name()));
- __ ldr(r1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r1, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2335,7 +2352,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
CallFunctionStub stub(arg_count, flags);
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2385,7 +2402,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2426,7 +2443,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
context()->DropAndPlug(1, r0);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ ldr(r0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ ldr(r0, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ push(r0);
EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2534,7 +2553,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(r0);
}
@@ -2689,14 +2708,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(r2, ip);
__ b(eq, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
// Skip loop if no descriptors are valid.
__ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand(0));
+ __ cmp(r3, Operand::Zero());
__ b(eq, &done);
__ LoadInstanceDescriptors(r1, r4);
@@ -2714,10 +2733,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- // The use of ip to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ mov(ip, Operand(FACTORY->value_of_symbol()));
+ __ mov(ip, Operand(FACTORY->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ ldr(r3, MemOperand(r4, 0));
@@ -2752,6 +2771,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
+void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(r0, if_false);
+ __ CompareObjectType(r0, r1, r2, SYMBOL_TYPE);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2952,12 +2993,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(r0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -3027,7 +3068,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
@@ -3140,6 +3181,39 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(r2);
+ __ pop(r1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(r2);
+ __ pop(r1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
+ context()->Plug(r0);
+}
+
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3289,7 +3363,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3598,7 +3672,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length, SetCC);
__ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ b(&done);
__ bind(&non_trivial_array);
@@ -3610,7 +3684,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Check that all array elements are sequential ASCII strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand(0));
+ __ mov(string_length, Operand::Zero());
__ add(element,
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
@@ -3623,7 +3697,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ cmp(array_length, Operand(0));
+ __ cmp(array_length, Operand::Zero());
__ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin");
}
__ bind(&loop);
@@ -3632,7 +3706,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
@@ -3661,12 +3735,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ cmp(ip, Operand(0));
+ __ cmp(ip, Operand::Zero());
__ b(ne, &bailout);
__ tst(scratch2, Operand(0x80000000));
__ b(ne, &bailout);
@@ -3699,10 +3773,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ add(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -3718,7 +3792,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
@@ -3728,7 +3804,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@@ -3748,7 +3824,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@@ -3769,14 +3847,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ add(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(string,
+ string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
@@ -3853,7 +3933,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
- __ ldr(r2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ ldr(r2, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
__ mov(r0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(r2, r1, r0);
@@ -3975,7 +4057,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(r0);
}
@@ -4081,13 +4163,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
- __ mov(r1, Operand(Smi::FromInt(count_value)));
+ __ mov(r1, r0);
+ __ mov(r0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4157,7 +4242,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ ldr(r0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ ldr(r0, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(r2, Operand(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4202,13 +4289,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(r0, if_true);
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r0, ip);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(r0, if_false);
// Check for undetectable objects => false.
__ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
@@ -4216,16 +4303,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(r0, Heap::kTrueValueRootIndex);
__ b(eq, if_true);
__ CompareRoot(r0, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(eq, if_true);
__ JumpIfSmi(r0, if_false);
@@ -4235,19 +4322,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ tst(r1, Operand(1 << Map::kIsUndetectable));
Split(ne, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(r0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
__ b(eq, if_true);
__ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(r0, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
}
+ if (FLAG_harmony_symbols) {
+ __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
+ __ b(eq, if_true);
+ }
// Check for JS objects => true.
__ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(lt, if_false);
@@ -4306,29 +4397,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cond = eq;
- break;
- case Token::LT:
- cond = lt;
- break;
- case Token::GT:
- cond = gt;
- break;
- case Token::LTE:
- cond = le;
- break;
- case Token::GTE:
- cond = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cond = CompareIC::ComputeCondition(op);
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4344,11 +4413,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}
diff --git a/src/3rdparty/v8/src/arm/ic-arm.cc b/src/3rdparty/v8/src/arm/ic-arm.cc
index 4839589..e8d0fab 100644
--- a/src/3rdparty/v8/src/arm/ic-arm.cc
+++ b/src/3rdparty/v8/src/arm/ic-arm.cc
@@ -213,53 +213,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r2 : name
- // -- lr : return address
- // -- r0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -350,30 +303,30 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
+// Checks whether a key is an array index string or an internalized string.
+// Falls through if a key is an internalized string.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
- Label* not_symbol) {
+ Label* not_internalized) {
// The key is not a smi.
// Is it a string?
__ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE);
- __ b(ge, not_symbol);
+ __ b(ge, not_internalized);
// Is the string an array index, with cached numeric value?
__ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset));
__ tst(hash, Operand(String::kContainsCachedArrayIndexMask));
__ b(eq, index_string);
- // Is the string a symbol?
+ // Is the string internalized?
// map: key map
__ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ tst(hash, Operand(kIsSymbolMask));
- __ b(eq, not_symbol);
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ tst(hash, Operand(kIsInternalizedMask));
+ __ b(eq, not_internalized);
}
@@ -632,7 +585,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&check_string);
GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be internalized.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -660,7 +613,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor an internalized string,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
@@ -703,8 +656,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags =
- Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, r0, r2, r3, r4, r5, r6);
@@ -862,7 +815,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(r0, r2);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -891,7 +844,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -925,7 +878,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -938,7 +891,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(r1, r0);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@@ -1034,7 +987,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(r3, r3, Operand(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -1051,13 +1004,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
__ cmp(r2, r5);
__ b(ne, &try_next_entry);
- __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
+ __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load string
__ cmp(r0, r5);
__ b(eq, &hit_on_nth_entry[i]);
__ bind(&try_next_entry);
}
- // Last entry: Load map and move r4 to symbol.
+ // Last entry: Load map and move r4 to string.
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
__ cmp(r2, r5);
__ b(ne, &slow);
@@ -1119,8 +1072,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
// Load the property to r0.
GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
- __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
- 1, r2, r3);
+ __ IncrementCounter(
+ isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
__ Ret();
__ bind(&index_string);
@@ -1158,7 +1111,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1198,11 +1151,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -1213,7 +1166,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(r2, r1, r0);
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1249,7 +1202,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@@ -1270,7 +1225,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in r0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(r0, r2);
__ Ret();
__ bind(&fail);
@@ -1379,7 +1336,6 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
- receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,
@@ -1407,7 +1363,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1419,7 +1377,9 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1433,7 +1393,8 @@ static void KeyedStoreGenerateGenericHelper(
r4,
slow);
ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1577,62 +1538,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = r1;
- Register value = r0;
- Register scratch = r3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE);
- __ b(ne, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ CompareRoot(scratch, Heap::kHashTableMapRootIndex);
- __ b(eq, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : value
@@ -1699,36 +1604,15 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address cmp_instruction_address =
+ Assembler::return_address_from_call_start(address);
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a cmp rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(cmp_instruction_address);
+ return Assembler::IsCmpImmediate(instr);
}
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.cc b/src/3rdparty/v8/src/arm/lithium-arm.cc
index b492d48..3385b43 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-arm.cc
@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -112,7 +112,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -297,6 +301,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -346,6 +355,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -373,11 +393,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -591,6 +627,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -656,6 +693,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -941,6 +983,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -977,7 +1025,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ // If there is a non-return use, the context must be allocated in a register.
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ return DefineAsRegister(new(zone()) LContext);
+ }
+ }
+
+ return NULL;
}
@@ -994,7 +1049,8 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+ return DefineAsRegister(new(zone()) LGlobalObject(context,
+ instr->qml_global()));
}
@@ -1025,6 +1081,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
@@ -1068,7 +1133,8 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), r0), instr);
+ LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
+ return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1086,6 +1152,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), r1);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), r1);
argument_count_ -= instr->argument_count();
@@ -1210,31 +1284,43 @@ HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) {
HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
- // Only optimize when we have magic numbers for the divisor.
- // The standard integer division routine is usually slower than transitionning
- // to VFP.
- if (divisor->IsConstant() &&
- HConstant::cast(divisor)->HasInteger32Value()) {
+ if (CpuFeatures::IsSupported(SUDIV)) {
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
+ }
+
+ if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) {
HConstant* constant_val = HConstant::cast(divisor);
int32_t int32_val = constant_val->Integer32Value();
- if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) {
+ if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) ||
+ CpuFeatures::IsSupported(SUDIV)) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
}
+
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
- HValue* right = instr->right();
- LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterOrConstant(right);
- LOperand* remainder = TempRegister();
- ASSERT(right->IsConstant() &&
- HConstant::cast(right)->HasInteger32Value() &&
- HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()));
- return AssignEnvironment(DefineAsRegister(
+ HValue* right = instr->right();
+ LOperand* dividend = UseRegister(instr->left());
+ LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
+ ? UseRegister(right)
+ : UseOrConstant(right);
+ LOperand* remainder = TempRegister();
+ ASSERT(CpuFeatures::IsSupported(SUDIV) ||
+ (right->IsConstant() &&
+ HConstant::cast(right)->HasInteger32Value() &&
+ HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())));
+ return AssignEnvironment(DefineAsRegister(
new(zone()) LMathFloorOfDiv(dividend, divisor, remainder)));
}
@@ -1303,8 +1389,28 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- return DoArithmeticD(Token::MUL, instr);
+ if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
+ instr->uses().value()->IsSub())) {
+ HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+ if (use->IsAdd() && instr == use->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded into a
+ // multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add in DoAdd.
+ return NULL;
+ }
+ if (instr == use->right() && use->IsSub()) {
+ // This mul is the rhs of a sub. The sub and mul will be folded into a
+ // multiply-sub in DoSub.
+ return NULL;
+ }
+ }
+
+ return DoArithmeticD(Token::MUL, instr);
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1315,6 +1421,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
+
+ if (instr->left()->IsConstant()) {
+ // If lhs is constant, do reverse subtraction instead.
+ return DoRSub(instr);
+ }
+
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1324,6 +1436,10 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->right()->IsMul()) {
+ return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
+ }
+
return DoArithmeticD(Token::SUB, instr);
} else {
return DoArithmeticT(Token::SUB, instr);
@@ -1331,6 +1447,44 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+ ASSERT(instr->representation().IsInteger32());
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+
+ // Note: The lhs of the subtraction becomes the rhs of the
+ // reverse-subtraction.
+ LOperand* left = UseRegisterAtStart(instr->right());
+ LOperand* right = UseOrConstantAtStart(instr->left());
+ LRSubI* rsb = new(zone()) LRSubI(left, right);
+ LInstruction* result = DefineAsRegister(rsb);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+ LOperand* minuend_op = UseRegisterAtStart(minuend);
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+ return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
+ multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1344,6 +1498,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (instr->left()->IsMul()) {
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+ }
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@@ -1409,7 +1572,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1563,6 +1726,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1601,6 +1785,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1625,6 +1810,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1644,6 +1830,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1699,6 +1886,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1881,8 +2074,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
-
- LOperand* external_pointer = UseRegister(instr->elements());
+ // float->double conversion on non-VFP2 requires an extra scratch
+ // register. For convenience, just mark the elements register as "UseTemp"
+ // so that it can be used as a temp during the float->double conversion
+ // after it's no longer needed after the float load.
+ bool needs_temp =
+ !CpuFeatures::IsSupported(VFP2) &&
+ (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+ LOperand* external_pointer = needs_temp
+ ? UseTempRegister(instr->elements())
+ : UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@@ -1906,36 +2107,47 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key;
- LOperand* val;
- if (instr->NeedsWriteBarrier()) {
- key = UseTempRegister(instr->key());
- val = UseTempRegister(instr->value());
- } else {
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseRegisterAtStart(instr->value());
- }
+ ElementsKind elements_kind = instr->elements_kind();
-#ifdef DEBUG
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
- } else {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ ASSERT(instr->value()->representation().IsTagged());
+ object = UseTempRegister(instr->elements());
+ val = needs_write_barrier ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ key = needs_write_barrier ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ return new(zone()) LStoreKeyed(object, key, val);
}
-#endif
- LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
- ASSERT(result != NULL);
- return result;
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ bool val_is_temp_register =
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -1954,14 +2166,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- LOperand* object = UseRegister(instr->object());
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
+ } else if (FLAG_compiled_transitions) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), r0);
LOperand* fixed_object_reg = FixedTemp(r2);
@@ -1970,11 +2184,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -2041,12 +2265,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* size = UseTempRegister(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
}
@@ -2089,8 +2324,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2158,7 +2402,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2202,8 +2446,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/src/3rdparty/v8/src/arm/lithium-arm.h b/src/3rdparty/v8/src/arm/lithium-arm.h
index 2c289dd..c654400 100644
--- a/src/3rdparty/v8/src/arm/lithium-arm.h
+++ b/src/3rdparty/v8/src/arm/lithium-arm.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@@ -93,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@@ -106,6 +109,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -131,10 +135,13 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
+ V(MultiplySubD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@@ -148,6 +155,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -165,11 +173,13 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
+ V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -252,6 +262,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
+ // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
@@ -394,6 +409,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
+class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@@ -621,6 +645,42 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = minuend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* minuend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -636,7 +696,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -661,6 +721,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -917,6 +1001,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -985,6 +1082,21 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
+class LRSubI: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LRSubI(LOperand* left, LOperand* right) {
+ inputs_[0] = left;
+ inputs_[1] = right;
+ }
+
+ LOperand* left() { return inputs_[0]; }
+ LOperand* right() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+ DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@@ -1138,6 +1250,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1372,6 +1508,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1518,6 +1655,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
@@ -1691,6 +1829,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
@@ -1762,6 +1917,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1946,10 +2102,10 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp) {
+ LOperand* fixed_object_temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp;
+ temps_[1] = fixed_object_temp;
}
LOperand* object() { return inputs_[0]; }
@@ -1964,6 +2120,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
@@ -2076,8 +2250,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@@ -2145,7 +2321,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2160,6 +2336,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
};
+class LAllocate: public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@@ -2284,8 +2477,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2407,6 +2601,10 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+ LInstruction* DoRSub(HSub* instr);
+
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
index 67773ee..f0b0e96 100644
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-codegen-arm.cc
@@ -65,10 +65,6 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope1(VFP3);
- CpuFeatures::Scope scope2(ARMv7);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
@@ -87,7 +83,14 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
}
@@ -118,65 +121,98 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // r1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- Label begin;
- __ bind(&begin);
- __ cmp(r5, Operand(0));
- __ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ str(r2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
- //ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos());
- }
-
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Add unused load of ip to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
+ // r1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ cmp(r5, Operand::Zero());
+ __ b(eq, &ok);
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ str(r2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
+ }
+
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
+ frame_is_built_ = true;
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ mov(r0, Operand(slots));
- __ mov(r2, Operand(kSlotsZapValue));
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+ __ push(r0);
+ __ push(r1);
+ __ add(r0, sp, Operand(slots * kPointerSize));
+ __ mov(r1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(r2);
- __ sub(r0, r0, Operand(1), SetCC);
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ str(r1, MemOperand(r0, 2 * kPointerSize));
+ __ cmp(r0, sp);
__ b(ne, &loop);
+ __ pop(r1);
+ __ pop(r0);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
__ push(r1);
@@ -211,7 +247,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -231,7 +267,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -246,10 +305,31 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ __ pop(ip);
+ __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -271,24 +351,77 @@ bool LCodeGen::GenerateDeoptJumpTable() {
// Each entry in the jump table generates one instruction and inlines one
// 32bit data after it.
if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
- deopt_jump_table_.length() * 2)) {
+ deopt_jump_table_.length() * 7)) {
Abort("Generated code is too large");
}
- // Block the constant pool emission during the jump table emission.
- __ BlockConstPoolFor(deopt_jump_table_.length());
__ RecordComment("[ Deoptimisation jump table");
Label table_start;
__ bind(&table_start);
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
- __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
- __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
+ Address entry = deopt_jump_table_[i].address;
+ bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
+ Deoptimizer::BailoutType type =
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (deopt_jump_table_[i].needs_frame) {
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ b(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, ip);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ b(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(pc, ip);
+ }
+ }
+ } else {
+ if (is_lazy_deopt) {
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ } else {
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+ }
+ }
+ masm()->CheckConstPool(false, false);
}
- ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
- deopt_jump_table_.length() * 2);
__ RecordComment("]");
+ // Force constant pool emission at the end of the deopt jump table to make
+ // sure that no constant pools are emitted after.
+ masm()->CheckConstPool(true, false);
+
// The deoptimization jump table is the last part of the instruction
// sequence. Mark the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
@@ -308,8 +441,8 @@ Register LCodeGen::ToRegister(int index) const {
}
-DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
- return DoubleRegister::FromAllocationIndex(index);
+DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
+ return DwVfpRegister::FromAllocationIndex(index);
}
@@ -350,15 +483,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
}
-DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
ASSERT(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
-DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch) {
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch) {
if (op->IsDoubleRegister()) {
return ToDoubleRegister(op->index());
} else if (op->IsConstantOperand()) {
@@ -403,8 +536,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
@@ -433,11 +564,11 @@ Operand LCodeGen::ToOperand(LOperand* op) {
return Operand(ToRegister(op));
} else if (op->IsDoubleRegister()) {
Abort("ToOperand IsDoubleRegister unimplemented");
- return Operand(0);
+ return Operand::Zero();
}
// Stack slots not implemented, use ToMemOperand instead.
UNREACHABLE();
- return Operand(0);
+ return Operand::Zero();
}
@@ -445,37 +576,20 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()));
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ int* pushed_arguments_index,
+ int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -487,14 +601,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
+ *pushed_arguments_index = -environment->parameter_count();
+ *pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ pushed_arguments_index,
+ pushed_arguments_count);
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -515,19 +631,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
// Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
+ // bumped and another stack area to be used for materialization,
+ // otherwise actual argument values are unknown for inlined frames.
+ bool arguments_known = true;
+ int arguments_index = *pushed_arguments_index;
+ int arguments_count = *pushed_arguments_count;
+ if (environment->entry() != NULL) {
+ arguments_known = environment->entry()->arguments_pushed();
+ arguments_index = arguments_index < 0
+ ? GetStackSlotCount() : arguments_index + arguments_count;
+ arguments_count = environment->entry()->arguments_count() + 1;
+ if (environment->entry()->arguments_pushed()) {
+ *pushed_arguments_index = arguments_index;
+ *pushed_arguments_count = arguments_count;
+ }
}
for (int i = 0; i < translation_size; ++i) {
@@ -542,8 +668,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -553,8 +680,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_double_registers()[value->index()],
false,
false,
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -562,8 +690,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -572,13 +701,15 @@ void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
+ translation->StoreArgumentsObject(
+ arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -710,36 +841,75 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
-
- if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
+ if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
- if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
+ if (FLAG_trap_on_deopt) {
+ __ stop("trap_on_deopt", cc);
+ }
- if (cc == al) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool needs_lazy_deopt = info()->IsStub();
+ if (cc == al && frame_is_built_) {
+ if (needs_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (deopt_jump_table_.is_empty() ||
- (deopt_jump_table_.last().address != entry)) {
- deopt_jump_table_.Add(JumpTableEntry(entry), zone());
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ deopt_jump_table_.Add(table_entry, zone());
}
__ b(cc, &deopt_jump_table_.last().label);
}
}
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded maps after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
+}
+
+
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
@@ -914,39 +1084,39 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ ldr(r0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -971,14 +1141,14 @@ void LCodeGen::DoModI(LModI* instr) {
if (divisor < 0) divisor = -divisor;
Label positive_dividend, done;
- __ cmp(dividend, Operand(0));
+ __ cmp(dividend, Operand::Zero());
__ b(pl, &positive_dividend);
- __ rsb(result, dividend, Operand(0));
+ __ rsb(result, dividend, Operand::Zero());
__ and_(result, result, Operand(divisor - 1), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(eq, instr->environment());
}
- __ rsb(result, result, Operand(0));
+ __ rsb(result, result, Operand::Zero());
__ b(&done);
__ bind(&positive_dividend);
__ and_(result, dividend, Operand(divisor - 1));
@@ -996,21 +1166,31 @@ void LCodeGen::DoModI(LModI* instr) {
CpuFeatures::Scope scope(SUDIV);
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
+ // Check for (kMinInt % -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmp(left, Operand(kMinInt));
+ __ b(ne, &left_not_min_int);
+ __ cmp(right, Operand(-1));
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
// For r3 = r1 % r2; we can have the following ARM code
// sdiv r3, r1, r2
// mls r3, r3, r2, r1
__ sdiv(result, left, right);
__ mls(result, result, right, left);
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(lt, instr->environment());
}
} else {
@@ -1029,16 +1209,18 @@ void LCodeGen::DoModI(LModI* instr) {
Label vfp_modulo, both_positive, right_negative;
+ CpuFeatures::Scope scope(VFP2);
+
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
__ Move(result, left);
// (0 % x) must yield 0 (if x is finite, which is the case here).
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
__ b(eq, &done);
// Preload right in a vfp register.
__ vmov(divisor.low(), right);
@@ -1058,7 +1240,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&right_negative);
// Negate right. The sign of the divisor does not matter.
- __ rsb(right, right, Operand(0));
+ __ rsb(right, right, Operand::Zero());
__ bind(&both_positive);
const int kUnfolds = 3;
@@ -1109,7 +1291,7 @@ void LCodeGen::DoModI(LModI* instr) {
// Check for -0.
__ sub(scratch2, left, scratch, SetCC);
__ b(ne, &ok);
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
__ bind(&ok);
// Load the result and we are done.
@@ -1144,11 +1326,11 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
if (divisor > 0) {
__ Move(result, dividend);
} else {
- __ rsb(result, dividend, Operand(0), SetCC);
+ __ rsb(result, dividend, Operand::Zero(), SetCC);
DeoptimizeIf(vs, environment);
}
// Compute the remainder.
- __ mov(remainder, Operand(0));
+ __ mov(remainder, Operand::Zero());
return;
default:
@@ -1166,7 +1348,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
// handled separately.
if (divisor < 0) {
ASSERT(divisor != -1);
- __ rsb(result, result, Operand(0));
+ __ rsb(result, result, Operand::Zero());
}
// Compute the remainder.
if (divisor > 0) {
@@ -1202,7 +1384,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant(
__ mov(scratch, Operand(scratch, ASR, s));
}
__ add(result, scratch, Operand(dividend, LSR, 31));
- if (divisor < 0) __ rsb(result, result, Operand(0));
+ if (divisor < 0) __ rsb(result, result, Operand::Zero());
// Compute the remainder.
__ mov(ip, Operand(divisor));
// This sequence could be replaced with 'mls' when
@@ -1237,21 +1419,21 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
- __ cmp(right, Operand(0));
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
__ b(ne, &left_not_zero);
- __ cmp(right, Operand(0));
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left, Operand(kMinInt));
@@ -1298,31 +1480,98 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DwVfpRegister addend = ToDoubleRegister(instr->addend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ vmla(addend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+ DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
+ DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(minuend.is(ToDoubleRegister(instr->result())));
+
+ __ vmls(minuend, multiplier, multiplicand);
+}
+
+
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->left());
const Register remainder = ToRegister(instr->temp());
const Register scratch = scratch0();
- // We only optimize this for division by constants, because the standard
- // integer division routine is usually slower than transitionning to VFP.
- // This could be optimized on processors with SDIV available.
- ASSERT(instr->right()->IsConstantOperand());
- int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
- if (divisor < 0) {
- __ cmp(left, Operand(0));
+ if (!CpuFeatures::IsSupported(SUDIV)) {
+ // If the CPU doesn't support sdiv instruction, we only optimize when we
+ // have magic numbers for the divisor. The standard integer division routine
+ // is usually slower than transitionning to VFP.
+ ASSERT(instr->right()->IsConstantOperand());
+ int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
+ ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
+ if (divisor < 0) {
+ __ cmp(left, Operand::Zero());
+ DeoptimizeIf(eq, instr->environment());
+ }
+ EmitSignedIntegerDivisionByConstant(result,
+ left,
+ divisor,
+ remainder,
+ scratch,
+ instr->environment());
+ // We performed a truncating division. Correct the result if necessary.
+ __ cmp(remainder, Operand::Zero());
+ __ teq(remainder, Operand(divisor), ne);
+ __ sub(result, result, Operand(1), LeaveCC, mi);
+ } else {
+ CpuFeatures::Scope scope(SUDIV);
+ const Register right = ToRegister(instr->right());
+
+ // Check for x / 0.
+ __ cmp(right, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
+
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmp(left, Operand(kMinInt));
+ __ b(ne, &left_not_min_int);
+ __ cmp(right, Operand(-1));
+ DeoptimizeIf(eq, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ cmp(right, Operand::Zero());
+ __ cmp(left, Operand::Zero(), mi);
+ // "right" can't be null because the code would have already been
+ // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
+ // In this case we need to deoptimize to produce a -0.
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ Label done;
+ __ sdiv(result, left, right);
+ // If both operands have the same sign then we are done.
+ __ eor(remainder, left, Operand(right), SetCC);
+ __ b(pl, &done);
+
+ // Check if the result needs to be corrected.
+ __ mls(remainder, result, right, left);
+ __ cmp(remainder, Operand::Zero());
+ __ sub(result, result, Operand(1), LeaveCC, ne);
+
+ __ bind(&done);
}
- EmitSignedIntegerDivisionByConstant(result,
- left,
- divisor,
- remainder,
- scratch,
- instr->environment());
- // We operated a truncating division. Correct the result if necessary.
- __ cmp(remainder, Operand(0));
- __ teq(remainder, Operand(divisor), ne);
- __ sub(result, result, Operand(1), LeaveCC, mi);
}
@@ -1330,6 +1579,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
+ CpuFeatures::Scope vfp_scope(VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@@ -1376,22 +1626,22 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
}
switch (constant) {
case -1:
- __ rsb(result, left, Operand(0));
+ __ rsb(result, left, Operand::Zero());
break;
case 0:
if (bailout_on_minus_zero) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
- __ cmp(left, Operand(0));
+ __ cmp(left, Operand::Zero());
DeoptimizeIf(mi, instr->environment());
}
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
break;
case 1:
__ Move(result, left);
@@ -1418,7 +1668,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
}
// Correct the sign of the result is the constant is negative.
- if (constant < 0) __ rsb(result, result, Operand(0));
+ if (constant < 0) __ rsb(result, result, Operand::Zero());
} else {
// Generate standard code.
@@ -1445,9 +1695,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (bailout_on_minus_zero) {
// Bail out if the result is supposed to be negative zero.
Label done;
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
- __ cmp(ToRegister(instr->temp()), Operand(0));
+ __ cmp(ToRegister(instr->temp()), Operand::Zero());
DeoptimizeIf(mi, instr->environment());
__ bind(&done);
}
@@ -1585,6 +1835,27 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
+void LCodeGen::DoRSubI(LRSubI* instr) {
+ LOperand* left = instr->left();
+ LOperand* right = instr->right();
+ LOperand* result = instr->result();
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+ if (right->IsStackSlot() || right->IsArgument()) {
+ Register right_reg = EmitLoadRegister(right, ip);
+ __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+ } else {
+ ASSERT(right->IsRegister() || right->IsConstantOperand());
+ __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+ }
+
+ if (can_overflow) {
+ DeoptimizeIf(vs, instr->environment());
+ }
+}
+
+
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), Operand(instr->value()));
@@ -1594,6 +1865,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@@ -1705,6 +1977,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -1762,9 +2043,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- DoubleRegister left_reg = ToDoubleRegister(left);
- DoubleRegister right_reg = ToDoubleRegister(right);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left_reg = ToDoubleRegister(left);
+ DwVfpRegister right_reg = ToDoubleRegister(right);
+ DwVfpRegister result_reg = ToDoubleRegister(instr->result());
Label check_nan_left, check_zero, return_left, return_right, done;
__ VFPCompareAndSetFlags(left_reg, right_reg);
__ b(vs, &check_nan_left);
@@ -1807,9 +2089,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- DoubleRegister left = ToDoubleRegister(instr->left());
- DoubleRegister right = ToDoubleRegister(instr->right());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister left = ToDoubleRegister(instr->left());
+ DwVfpRegister right = ToDoubleRegister(instr->right());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
__ vadd(result, left, right);
@@ -1855,7 +2138,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
// Block literal pool emission to ensure nop indicating no inlined smi code
// is in the correct position.
Assembler::BlockConstPoolScope block_const_pool(masm());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1894,10 +2177,11 @@ void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->value());
- __ cmp(reg, Operand(0));
+ __ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- DoubleRegister reg = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
// Test the double value. Zero and NaN are false.
@@ -1912,7 +2196,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(true_block, false_block, eq);
} else if (type.IsSmi()) {
- __ cmp(reg, Operand(0));
+ __ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else {
Label* true_label = chunk_->GetAssemblyLabel(true_block);
@@ -1942,7 +2226,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
- __ cmp(reg, Operand(0));
+ __ cmp(reg, Operand::Zero());
__ b(eq, false_label);
__ JumpIfSmi(reg, true_label);
} else if (expected.NeedsMap()) {
@@ -1975,15 +2259,16 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
__ b(ge, &not_string);
__ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
- __ cmp(ip, Operand(0));
+ __ cmp(ip, Operand::Zero());
__ b(ne, true_label);
__ b(false_label);
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ CpuFeatures::Scope scope(VFP2);
// heap number -> false iff +0, -0, or NaN.
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
@@ -2061,6 +2346,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatures::Scope scope(VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2266,9 +2552,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
@@ -2352,7 +2639,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2383,7 +2670,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
__ b(ne, is_false);
@@ -2394,12 +2681,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ ldr(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
__ cmp(temp, Operand(class_name));
// End with the answer in flags.
}
@@ -2440,9 +2727,9 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
__ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
__ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
@@ -2492,7 +2779,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- PredictableCodeSizeScope predictable(masm_);
+ PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
@@ -2556,7 +2843,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_);
+ PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
@@ -2569,7 +2856,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ nop();
}
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2581,12 +2868,21 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
+ // This instruction also signals no smi code inlined.
+ __ cmp(r0, Operand::Zero());
Condition condition = ComputeCompareCondition(op);
__ LoadRoot(ToRegister(instr->result()),
@@ -2599,16 +2895,33 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in r0.
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ if (NeedsEagerFrame()) {
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ if (!info()->IsStub()) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
+ }
__ Jump(lr);
}
@@ -2958,17 +3271,63 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
DwVfpRegister result = ToDoubleRegister(instr->result());
Operand operand = key_is_constant
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, kScratchDoubleReg.low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ Register value = external_pointer;
+ __ ldr(value, MemOperand(scratch0(), additional_offset));
+ __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+ __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
+ __ and_(scratch0(), scratch0(),
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ teq(scratch0(), Operand(0x00));
+ __ b(eq, &exponent_rebiased);
+
+ __ teq(scratch0(), Operand(0xff));
+ __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
+ __ b(eq, &exponent_rebiased);
+
+ // Rebias exponent.
+ __ add(scratch0(),
+ scratch0(),
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ orr(sfpd_hi, sfpd_hi,
+ Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
+ __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
+
+ } else {
+ __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
+ __ ldr(sfpd_hi, MemOperand(scratch0(),
+ additional_offset + kPointerSize));
+ }
}
} else {
Register result = ToRegister(instr->result());
@@ -3037,23 +3396,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
}
- Operand operand = key_is_constant
- ? Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(elements, elements, operand);
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ ((constant_key + instr->additional_index()) << element_size_shift);
if (!key_is_constant) {
- __ add(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
+ __ add(elements, elements, Operand(key, LSL, shift_size));
+ }
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ add(elements, elements, Operand(base_offset));
+ __ vldr(result, elements, 0);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+ } else {
+ __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+ __ ldr(sfpd_lo, MemOperand(elements, base_offset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
}
}
@@ -3273,7 +3637,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
- __ cmp(length, Operand(0));
+ __ cmp(length, Operand::Zero());
__ b(eq, &invoke);
__ bind(&loop);
__ ldr(scratch, MemOperand(elements, length, LSL, 2));
@@ -3319,8 +3683,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ __ mov(result, cp);
+ return;
+ }
+ }
}
@@ -3479,18 +3849,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- __ cmp(input, Operand(0));
+ __ cmp(input, Operand::Zero());
__ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(result, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr->environment());
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(VFP2);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@@ -3527,7 +3898,8 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3541,7 +3913,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
__ vmov(scratch, input.high());
__ tst(scratch, Operand(HeapNumber::kSignMask));
@@ -3552,7 +3924,8 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
@@ -3567,7 +3940,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
// If the number is in ]-0.5, +0.5[, the result is +/- 0.
__ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
- __ mov(result, Operand(0), LeaveCC, le);
+ __ mov(result, Operand::Zero(), LeaveCC, le);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(le, &check_sign_on_zero);
} else {
@@ -3592,7 +3965,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(mi, instr->environment());
} else {
- __ mov(result, Operand(0), LeaveCC, mi);
+ __ mov(result, Operand::Zero(), LeaveCC, mi);
__ b(mi, &done);
}
@@ -3605,7 +3978,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
__ b(ne, &done);
__ bind(&check_sign_on_zero);
__ vmov(scratch, input.high());
@@ -3617,16 +3990,18 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister temp = ToDoubleRegister(instr->temp());
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister temp = ToDoubleRegister(instr->temp());
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
@@ -3645,6 +4020,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
+ CpuFeatures::Scope scope(VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -3677,6 +4053,7 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
+ CpuFeatures::Scope scope(VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3705,7 +4082,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// Load state[0].
__ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
- __ cmp(r1, Operand(0));
+ __ cmp(r1, Operand::Zero());
__ b(eq, deferred->entry());
// Load state[1].
__ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
@@ -3740,7 +4117,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
@@ -3754,11 +4131,26 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input = ToDoubleRegister(instr->value());
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DwVfpRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3766,7 +4158,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3774,7 +4166,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3782,7 +4174,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3874,7 +4266,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3906,9 +4298,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
+ __ mov(r0, Operand(instr->arity()));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in r2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ mov(r2, Operand(undefined_value));
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(r1));
+ ASSERT(ToRegister(instr->result()).is(r0));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
__ mov(r0, Operand(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ mov(r2, Operand(instr->hydrogen()->property_cell()));
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3991,28 +4403,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ tst(ToRegister(operand), Operand(kSmiTagMask));
- } else {
- __ mov(ip, ToOperand(operand));
- __ tst(ip, Operand(kSmiTagMask));
- }
- DeoptimizeIf(ne, environment);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -4030,6 +4423,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@@ -4100,6 +4494,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@@ -4133,10 +4528,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
+ Label after_canonicalization;
+
// Only load canonical NaN if the comparison above set the overflow.
+ __ b(vc, &after_canonicalization);
__ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg, vs);
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+
+ __ bind(&after_canonicalization);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
@@ -4217,30 +4616,40 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
- __ mov(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Move(r0, object_reg);
+ __ Move(r1, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(r3));
+ __ mov(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@@ -4248,7 +4657,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(r3));
+ __ mov(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@@ -4259,11 +4670,19 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ __ TestJSArrayForAllocationSiteInfo(object, temp);
+ DeoptimizeIf(eq, instr->environment());
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4298,7 +4717,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ push(string);
@@ -4339,7 +4758,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
+ __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
__ b(hi, deferred->entry());
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
@@ -4358,7 +4777,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(char_code);
@@ -4376,6 +4795,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4393,6 +4813,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4454,13 +4875,49 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+ masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSR, mantissa_shift_for_hi_word));
+ } else {
+ masm->mov(loword, Operand::Zero());
+ masm->orr(hiword, scratch,
+ Operand(hiword, LSL, -mantissa_shift_for_hi_word));
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
+ }
+}
+
+
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- DoubleRegister dbl_scratch = double_scratch0();
+ DwVfpRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
@@ -4475,16 +4932,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+ } else {
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
+ sfpd_lo, sfpd_hi,
+ scratch0(), s0);
+ }
} else {
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
+ } else {
+ Label no_leading_zero, done;
+ __ tst(src, Operand(0x80000000));
+ __ b(ne, &no_leading_zero);
+
+ // Integer has one leading zeros.
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
+ __ b(&done);
+
+ __ bind(&no_leading_zero);
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
+ __ b(&done);
+ }
}
if (FLAG_inline_new) {
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, r5);
__ b(&done);
}
@@ -4495,7 +4976,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
- __ mov(ip, Operand(0));
+ __ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
@@ -4504,7 +4985,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+ __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+ }
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4521,12 +5008,64 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
Register reg = ToRegister(instr->result());
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ __ b(vc, &no_special_nan_handling);
+ __ vmov(reg, scratch0(), input_reg);
+ __ cmp(scratch0(), Operand(kHoleNanUpper32));
+ Label canonicalize;
+ __ b(ne, &canonicalize);
+ __ Move(reg, factory()->the_hole_value());
+ __ b(&done);
+ __ bind(&canonicalize);
+ __ Vmov(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
+ no_reg);
+ } else {
+ Label not_hole;
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+ __ b(ne, &not_hole);
+ __ Move(reg, factory()->the_hole_value());
+ __ b(&done);
+ __ bind(&not_hole);
+ __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
+ __ cmp(scratch, Operand(0x7ff00000));
+ __ b(ne, &no_special_nan_handling);
+ Label special_nan_handling;
+ __ tst(sfpd_hi, Operand(0x000FFFFF));
+ __ b(ne, &special_nan_handling);
+ __ cmp(sfpd_lo, Operand(0));
+ __ b(eq, &no_special_nan_handling);
+ __ bind(&special_nan_handling);
+ double canonical_nan =
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+ uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
+ __ mov(sfpd_lo,
+ Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
+ __ mov(sfpd_hi,
+ Operand(static_cast<uint32_t>(casted_nan >> 32)));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@@ -4537,9 +5076,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+ } else {
+ __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+ __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+ }
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
+ __ bind(&done);
}
@@ -4548,7 +5094,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
- __ mov(reg, Operand(0));
+ __ mov(reg, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
@@ -4578,53 +5124,69 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
void LCodeGen::EmitNumberUntagD(Register input_reg,
- DoubleRegister result_reg,
+ DwVfpRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
+ CpuFeatures::Scope scope(VFP2);
Label load_smi, heap_number, done;
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
+ // Heap number map check.
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env);
+ } else {
+ Label heap_number;
+ __ b(eq, &heap_number);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
+ // Convert undefined to NaN.
+ __ LoadRoot(ip, Heap::kNanValueRootIndex);
+ __ sub(ip, ip, Operand(kHeapObjectTag));
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ jmp(&done);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ if (deoptimize_on_minus_zero) {
+ __ vmov(ip, result_reg.low());
+ __ cmp(ip, Operand::Zero());
+ __ b(ne, &done);
+ __ vmov(ip, result_reg.high());
+ __ cmp(ip, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, env);
+ }
__ jmp(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand(0));
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ SmiUntag(scratch, input_reg, SetCC);
+ DeoptimizeIf(cs, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ __ Vmov(result_reg,
+ FixedDoubleArray::hole_nan_as_double(),
+ no_reg);
+ __ b(&done);
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done);
// Smi to double register conversion
__ bind(&load_smi);
@@ -4659,8 +5221,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch3 = ToRegister(instr->temp2());
- SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4673,7 +5235,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
DeoptimizeIf(ne, instr->environment());
- __ mov(input_reg, Operand(0));
+ __ mov(input_reg, Operand::Zero());
__ b(&done);
__ bind(&heap_number);
@@ -4682,7 +5244,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ EmitECMATruncate(input_reg,
double_scratch2,
- single_scratch,
+ double_scratch,
scratch1,
scratch2,
scratch3);
@@ -4703,7 +5265,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
DeoptimizeIf(ne, instr->environment());
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ cmp(input_reg, Operand(0));
+ __ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ vmov(scratch1, double_scratch.high());
__ tst(scratch1, Operand(HeapNumber::kSignMask));
@@ -4750,12 +5312,30 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
ASSERT(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
- DoubleRegister result_reg = ToDoubleRegister(result);
+ DwVfpRegister result_reg = ToDoubleRegister(result);
+
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -4764,20 +5344,19 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
+ DwVfpRegister double_scratch = double_scratch0();
Label done;
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- SwVfpRegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
- single_scratch,
+ double_scratch,
scratch1,
scratch2,
scratch3);
} else {
- DwVfpRegister double_scratch = double_scratch0();
__ EmitVFPTruncate(kRoundToMinusInf,
result_reg,
double_input,
@@ -4867,46 +5446,48 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
+void LCodeGen::DoCheckMapCommon(Register map_reg,
Handle<Map> map,
CompareMapMode mode,
LEnvironment* env) {
Label success;
- __ CompareMap(reg, scratch, map, &success, mode);
+ __ CompareMap(map_reg, map, &success, mode);
DeoptimizeIf(ne, env);
__ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register scratch = scratch0();
+ Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
- __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
+ __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP);
__ b(eq, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+ CpuFeatures::Scope vfp_scope(VFP2);
+ DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
__ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -4914,10 +5495,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
- DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+ DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
@@ -4932,7 +5514,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
DeoptimizeIf(ne, instr->environment());
- __ mov(result_reg, Operand(0));
+ __ mov(result_reg, Operand::Zero());
__ jmp(&done);
// Heap number
@@ -4952,30 +5534,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
+ Register prototype_reg = ToRegister(instr->temp());
+ Register map_reg = ToRegister(instr->temp2());
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ ASSERT(prototypes->length() == maps->length());
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ __ LoadHeapObject(prototype_reg,
+ prototypes->at(prototypes->length() - 1));
+ } else {
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg,
+ maps->at(i),
+ ALLOW_ELEMENT_TRANSITION_MAPS,
+ instr->environment());
+ }
}
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@@ -5052,7 +5634,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
- __ mov(result, Operand(0));
+ __ mov(result, Operand::Zero());
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(r0, Operand(Smi::FromInt(instance_size)));
@@ -5062,10 +5644,74 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(size, size);
+ __ push(size);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
+ AllocationSiteMode allocation_site_mode =
+ instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
@@ -5097,8 +5743,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(instr->hydrogen()->depth() == 1);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -5106,10 +5752,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
} else {
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5117,10 +5763,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset) {
+ int* offset,
+ AllocationSiteMode mode) {
ASSERT(!source.is(r2));
ASSERT(!result.is(r2));
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ object->map()->CanTrackAllocationSite();
+
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
@@ -5130,8 +5780,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
*offset += object_size + elements_size;
// Copy object header.
@@ -5150,13 +5805,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
+ isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
@@ -5166,6 +5823,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
+ allocation_site_info_map())));
+ __ str(r2, FieldMemOperand(result, object_size));
+ __ str(source, FieldMemOperand(result, object_size + kPointerSize));
+ }
+
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
@@ -5195,13 +5860,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
@@ -5252,7 +5918,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
+ instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
@@ -5263,25 +5930,26 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(r4, literals);
- __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r2, Operand(constant_properties));
+ __ LoadHeapObject(r3, literals);
+ __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(r1, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
- __ mov(r1, Operand(Smi::FromInt(flags)));
- __ Push(r4, r3, r2, r1);
+ __ mov(r0, Operand(Smi::FromInt(flags)));
// Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(r3, r2, r1, r0);
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5355,7 +6023,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
FastNewClosureStub stub(shared_info->language_mode());
__ mov(r1, Operand(shared_info));
__ push(r1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ mov(r2, Operand(shared_info));
__ mov(r1, Operand(pretenure
@@ -5397,14 +6065,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Handle<String> type_name) {
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(input, Operand(ip));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
__ b(ge, false_label);
@@ -5412,17 +6080,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ b(eq, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ b(eq, true_label);
__ JumpIfSmi(input, false_label);
@@ -5432,7 +6100,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ tst(ip, Operand(1 << Map::kIsUndetectable));
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
@@ -5440,14 +6108,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
}
- __ CompareObjectType(input, input, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ if (FLAG_harmony_symbols) {
+ __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
+ __ b(eq, true_label);
+ __ CompareInstanceType(input, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ } else {
+ __ CompareObjectType(input, input, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ }
__ b(lt, false_label);
__ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ b(gt, false_label);
@@ -5494,6 +6169,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
@@ -5526,6 +6202,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
@@ -5586,8 +6267,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
- PredictableCodeSizeScope predictable(masm_);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -5680,7 +6361,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
- __ cmp(result, Operand(0));
+ __ cmp(result, Operand::Zero());
DeoptimizeIf(eq, instr->environment());
__ bind(&done);
@@ -5703,7 +6384,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
Register scratch = scratch0();
Label out_of_object, done;
- __ cmp(index, Operand(0));
+ __ cmp(index, Operand::Zero());
__ b(lt, &out_of_object);
STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
diff --git a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
index 921285b..f1e3332 100644
--- a/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
+++ b/src/3rdparty/v8/src/arm/lithium-codegen-arm.h
@@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@@ -84,12 +95,12 @@ class LCodeGen BASE_EMBEDDED {
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
- DoubleRegister ToDoubleRegister(LOperand* op) const;
+ DwVfpRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
- DoubleRegister EmitLoadDoubleRegister(LOperand* op,
- SwVfpRegister flt_scratch,
- DoubleRegister dbl_scratch);
+ DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+ SwVfpRegister flt_scratch,
+ DwVfpRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
@@ -128,10 +139,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
@@ -193,7 +205,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -267,15 +279,17 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
void PopulateDeoptimizationLiteralsWithInlinedFunctions();
Register ToRegister(int index) const;
- DoubleRegister ToDoubleRegister(int index) const;
+ DwVfpRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
@@ -308,14 +322,11 @@ class LCodeGen BASE_EMBEDDED {
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitNumberUntagD(Register input,
- DoubleRegister result,
+ DwVfpRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ LEnvironment* env,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -355,7 +366,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
// Emit optimized code for integer division.
// Inputs are signed.
@@ -369,11 +381,15 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* environment);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
@@ -395,6 +411,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
@@ -402,6 +419,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -417,6 +435,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
diff --git a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
index c100720..4df1338 100644
--- a/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/3rdparty/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@@ -229,7 +233,8 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
- // ip is overwritten while saving the value to the destination.
+ CpuFeatures::Scope scope(VFP2);
+ // ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
@@ -276,7 +282,8 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
- MemOperand source_operand = cgen_->ToMemOperand(source);
+ CpuFeatures::Scope scope(VFP2);
+ MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
index dcc7149..326f555 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.cc
@@ -290,7 +290,7 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
}
-void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatures::Scope scope(VFP2);
if (!dst.is(src)) {
@@ -304,7 +304,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
if (!src2.is_reg() &&
!src2.must_output_reloc_info(this) &&
src2.immediate() == 0) {
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
+ mov(dst, Operand::Zero(), LeaveCC, cond);
} else if (!src2.is_single_instruction(this) &&
!src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) &&
@@ -410,7 +410,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
}
tst(dst, Operand(~satval));
b(eq, &done);
- mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative.
+ mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative.
mov(dst, Operand(satval), LeaveCC, pl); // satval if positive.
bind(&done);
} else {
@@ -423,8 +423,10 @@ void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- !Heap::RootCanBeWrittenAfterInitialization(index)) {
- Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
+ !Heap::RootCanBeWrittenAfterInitialization(index) &&
+ !predictable_code_size()) {
+ Handle<Object> root(isolate()->heap()->roots_array_start()[index],
+ isolate());
if (!isolate()->heap()->InNewSpace(*root)) {
// The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value.
@@ -641,20 +643,24 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
PushSafepointRegisters();
- sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
}
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
+ for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
}
- add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
+ add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
kDoubleSize));
PopSafepointRegisters();
}
@@ -689,8 +695,10 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+ // Number of d-regs not known at snapshot time.
+ ASSERT(!Serializer::enabled());
// General purpose registers are pushed last on the stack.
- int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -769,15 +777,6 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
-void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond) {
- vmrs(scratch, cond);
- bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
- vmsr(scratch, cond);
-}
-
-
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
@@ -813,19 +812,18 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch,
- const Condition cond) {
+ const Register scratch) {
ASSERT(CpuFeatures::IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
// Handle special values first.
if (value.bits == zero.bits) {
- vmov(dst, kDoubleRegZero, cond);
+ vmov(dst, kDoubleRegZero);
} else if (value.bits == minus_zero.bits) {
- vneg(dst, kDoubleRegZero, cond);
+ vneg(dst, kDoubleRegZero);
} else {
- vmov(dst, imm, scratch, cond);
+ vmov(dst, imm, scratch);
}
}
@@ -863,7 +861,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Reserve room for saved entry sp and code object.
sub(sp, sp, Operand(2 * kPointerSize));
if (emit_debug_code()) {
- mov(ip, Operand(0));
+ mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
mov(ip, Operand(CodeObject()));
@@ -877,12 +875,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vstm(db_w, sp, first, last);
+ CpuFeatures::Scope scope(VFP2);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ CheckFor32DRegs(ip);
+
+ // Push registers d0-d15, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ vstm(db_w, sp, d16, d31, ne);
+ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ vstm(db_w, sp, d0, d15);
// Note that d0 will be accessible at
- // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+ // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
// since the sp slot and code slot were pushed after the fp.
}
@@ -937,17 +940,24 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
+ CpuFeatures::Scope scope(VFP2);
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
- sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize));
- DwVfpRegister first = d0;
- DwVfpRegister last =
- DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1);
- vldm(ia, r3, first, last);
+ sub(r3, fp,
+ Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
+
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ CheckFor32DRegs(ip);
+
+ // Pop registers d0-d15, and possibly d16-d31, from r3.
+ // If d16-d31 are not popped, increase r3 instead.
+ vldm(ia_w, r3, d0, d15);
+ vldm(ia_w, r3, d16, d31, ne);
+ add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq);
}
// Clear top frame.
- mov(r3, Operand(0, RelocInfo::NONE));
+ mov(r3, Operand::Zero());
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
str(r3, MemOperand(ip));
@@ -966,7 +976,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
-void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
+void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
@@ -1217,11 +1227,11 @@ void MacroAssembler::IsObjectJSStringType(Register object,
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- mov(r0, Operand(0, RelocInfo::NONE));
+ mov(r0, Operand::Zero());
mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -1248,7 +1258,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
mov(r7, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer.
+ mov(ip, Operand::Zero()); // NULL frame pointer.
stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
} else {
stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
@@ -1372,7 +1382,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
#ifdef DEBUG
- cmp(scratch, Operand(0, RelocInfo::NONE));
+ cmp(scratch, Operand::Zero());
Check(ne, "we should not have an empty lexical context");
#endif
@@ -1616,6 +1626,18 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top.
if (obj_size_operand.is_single_instruction(this)) {
@@ -1701,6 +1723,18 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
ldr(ip, MemOperand(topaddr, limit - top));
}
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
+ Label aligned;
+ b(eq, &aligned);
+ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+ str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted. Use result
// to calculate the new top. Object size may be in words so a shift is
// required to get the number of bytes.
@@ -1786,10 +1820,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
+ Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -1955,13 +1989,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -1988,8 +2022,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ str(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -1998,7 +2034,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// it's an Infinity, and the non-NaN code path applies.
b(gt, &is_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand(0));
+ cmp(mantissa_reg, Operand::Zero());
b(eq, &have_double_value);
bind(&is_nan);
// Load canonical NaN for storing into the double array.
@@ -2010,7 +2046,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
@@ -2182,15 +2219,17 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
}
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
+void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
+ Condition cond) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond);
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
}
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
}
@@ -2202,13 +2241,13 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
int stack_space) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
// Allocate HandleScope in callee-save registers.
@@ -2219,19 +2258,35 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, r0);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
stub.GenerateCall(this, function);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, r0);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
// If result is non-zero, dereference to get the result value
// otherwise set it to undefined.
- cmp(r0, Operand(0));
+ cmp(r0, Operand::Zero());
LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
ldr(r0, MemOperand(r0), ne);
@@ -2410,7 +2465,7 @@ void MacroAssembler::ConvertToInt32(Register source,
HeapNumber::kExponentBits);
// Load dest with zero. We use this either for the final shift or
// for the answer.
- mov(dest, Operand(0, RelocInfo::NONE));
+ mov(dest, Operand::Zero());
// Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
@@ -2464,12 +2519,26 @@ void MacroAssembler::ConvertToInt32(Register source,
// Move down according to the exponent.
mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set.
- rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ rsb(dest, dest, Operand::Zero(), LeaveCC, ne);
bind(&done);
}
}
+void MacroAssembler::TryFastDoubleToInt32(Register result,
+ DwVfpRegister double_input,
+ DwVfpRegister double_scratch,
+ Label* done) {
+ ASSERT(!double_input.is(double_scratch));
+
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
+ b(eq, done);
+}
+
+
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register result,
DwVfpRegister double_input,
@@ -2485,11 +2554,7 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, &done);
+ TryFastDoubleToInt32(result, double_input, double_scratch, &done);
// Convert to integer, respecting rounding mode.
int32_t check_inexact_conversion =
@@ -2545,7 +2610,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
// Check for Infinity and NaNs, which should return 0.
cmp(result, Operand(HeapNumber::kExponentMask));
- mov(result, Operand(0), LeaveCC, eq);
+ mov(result, Operand::Zero(), LeaveCC, eq);
b(eq, &done);
// Express exponent as delta to (number of mantissa bits + 31).
@@ -2557,7 +2622,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
// If the delta is strictly positive, all bits would be shifted away,
// which means that we can return 0.
b(le, &normal_exponent);
- mov(result, Operand(0));
+ mov(result, Operand::Zero());
b(&done);
bind(&normal_exponent);
@@ -2585,7 +2650,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
b(&pos_shift, ge);
// Negate scratch.
- rsb(scratch, scratch, Operand(0));
+ rsb(scratch, scratch, Operand::Zero());
mov(input_low, Operand(input_low, LSL, scratch));
b(&shift_done);
@@ -2595,10 +2660,10 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
bind(&shift_done);
orr(input_high, input_high, Operand(input_low));
// Restore sign if necessary.
- cmp(sign, Operand(0));
+ cmp(sign, Operand::Zero());
result = sign;
sign = no_reg;
- rsb(result, input_high, Operand(0), LeaveCC, ne);
+ rsb(result, input_high, Operand::Zero(), LeaveCC, ne);
mov(result, input_high, LeaveCC, eq);
bind(&done);
}
@@ -2606,7 +2671,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
void MacroAssembler::EmitECMATruncate(Register result,
DwVfpRegister double_input,
- SwVfpRegister single_scratch,
+ DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low) {
@@ -2617,17 +2682,33 @@ void MacroAssembler::EmitECMATruncate(Register result,
ASSERT(!scratch.is(result) &&
!scratch.is(input_high) &&
!scratch.is(input_low));
- ASSERT(!single_scratch.is(double_input.low()) &&
- !single_scratch.is(double_input.high()));
+ ASSERT(!double_input.is(double_scratch));
Label done;
+ // Test if the value can be exactly represented as a signed integer.
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ // Note: this comparison is cheaper than reading the FPSCR exception bits.
+ VFPCompareAndSetFlags(double_input, double_scratch);
+ b(eq, &done);
+
+ // Check the exception flags. If they are not set, we are done.
+ // If they are set, it could be because of the conversion above, or because
+ // they were set before this code.
+ vmrs(scratch);
+ tst(scratch, Operand(kVFPOverflowExceptionBit |
+ kVFPUnderflowExceptionBit |
+ kVFPInvalidOpExceptionBit));
+ b(eq, &done);
+
// Clear cumulative exception flags.
- ClearFPSCRBits(kVFPExceptionMask, scratch);
+ bic(scratch, scratch, Operand(kVFPExceptionMask));
+ vmsr(scratch);
// Try a conversion to a signed integer.
- vcvt_s32_f64(single_scratch, double_input);
- vmov(result, single_scratch);
- // Retrieve he FPSCR.
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ // Retrieve the FPSCR.
vmrs(scratch);
// Check for overflow and NaNs.
tst(scratch, Operand(kVFPOverflowExceptionBit |
@@ -2697,7 +2778,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- CEntryStub stub(1, kSaveFPRegs);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub stub(1, mode);
CallStub(&stub);
}
@@ -2740,7 +2824,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
#endif
mov(r1, Operand(builtin));
CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2992,6 +3076,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ ldr(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ ldr(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ ldr(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -3234,7 +3331,7 @@ void MacroAssembler::CopyBytes(Register src,
// Align src before copying in word size chunks.
bind(&align_loop);
- cmp(length, Operand(0));
+ cmp(length, Operand::Zero());
b(eq, &done);
bind(&align_loop_1);
tst(src, Operand(kPointerSize - 1));
@@ -3269,7 +3366,7 @@ void MacroAssembler::CopyBytes(Register src,
// Copy the last bytes if any left.
bind(&byte_loop);
- cmp(length, Operand(0));
+ cmp(length, Operand::Zero());
b(eq, &done);
bind(&byte_loop_1);
ldrb(scratch, MemOperand(src, 1, PostIndex));
@@ -3307,7 +3404,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
// Order of the next two lines is important: zeros register
// can be the same as source register.
Move(scratch, source);
- mov(zeros, Operand(0, RelocInfo::NONE));
+ mov(zeros, Operand::Zero());
// Top 16.
tst(scratch, Operand(0xffff0000));
add(zeros, zeros, Operand(16), LeaveCC, eq);
@@ -3331,6 +3428,13 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
}
+void MacroAssembler::CheckFor32DRegs(Register scratch) {
+ mov(scratch, Operand(ExternalReference::cpu_features()));
+ ldr(scratch, MemOperand(scratch));
+ tst(scratch, Operand(1u << VFP32DREGS));
+}
+
+
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first,
Register second,
@@ -3369,9 +3473,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
if (use_eabi_hardfloat()) {
// In the hard floating point calling convention, we can use
// all double registers to pass doubles.
- if (num_double_arguments > DoubleRegister::kNumRegisters) {
+ if (num_double_arguments > DoubleRegister::NumRegisters()) {
stack_passed_words +=
- 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+ 2 * (num_double_arguments - DoubleRegister::NumRegisters());
}
} else {
// In the soft floating point calling convention, every double
@@ -3412,7 +3516,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
@@ -3422,8 +3526,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
- DoubleRegister dreg2) {
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
+ DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
@@ -3441,7 +3545,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
}
-void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
+void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
@@ -3724,8 +3828,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg) {
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg) {
Label above_zero;
Label done;
Label in_bounds;
@@ -3735,7 +3839,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
b(gt, &above_zero);
// Double value is less than zero, NaN or Inf, return 0.
- mov(result_reg, Operand(0));
+ mov(result_reg, Operand::Zero());
b(al, &done);
// Double value is >= 255, return 255.
@@ -3818,6 +3922,29 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
}
+void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+ Register receiver_reg,
+ Register scratch_reg) {
+ Label no_info_available;
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ add(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Operand(new_space_start));
+ b(lt, &no_info_available);
+ mov(ip, Operand(new_space_allocation_top));
+ ldr(ip, MemOperand(ip));
+ cmp(scratch_reg, ip);
+ b(gt, &no_info_available);
+ ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
+ cmp(scratch_reg,
+ Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
+ bind(&no_info_available);
+}
+
+
#ifdef DEBUG
bool AreAliased(Register reg1,
Register reg2,
@@ -3844,7 +3971,6 @@ bool AreAliased(Register reg1,
CodePatcher::CodePatcher(byte* address, int instructions)
: address_(address),
- instructions_(instructions),
size_(instructions * Assembler::kInstrSize),
masm_(NULL, address, size_ + Assembler::kGap) {
// Create a new macro assembler pointing to the address of the code to patch.
diff --git a/src/3rdparty/v8/src/arm/macro-assembler-arm.h b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
index 0ff8579..7b05a67 100644
--- a/src/3rdparty/v8/src/arm/macro-assembler-arm.h
+++ b/src/3rdparty/v8/src/arm/macro-assembler-arm.h
@@ -54,20 +54,6 @@ inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer
const Register kRootRegister = { 10 }; // Roots array pointer.
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1,
- // Specify that the requested size of the space to allocate is specified in
- // words instead of bytes.
- SIZE_IN_WORDS = 1 << 2
-};
-
// Flags used for AllocateHeapNumber
enum TaggingMode {
// Tag the result.
@@ -178,7 +164,7 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
- void Move(DoubleRegister dst, DoubleRegister src);
+ void Move(DwVfpRegister dst, DwVfpRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
@@ -322,6 +308,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -473,11 +460,6 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
- // Clear specified FPSCR bits.
- void ClearFPSCRBits(const uint32_t bits_to_clear,
- const Register scratch,
- const Condition cond = al);
-
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -498,8 +480,7 @@ class MacroAssembler: public Assembler {
void Vmov(const DwVfpRegister dst,
const double imm,
- const Register scratch = no_reg,
- const Condition cond = al);
+ const Register scratch = no_reg);
// Enter exit frame.
// stack_space - extra stack space, used for alignment before call to C.
@@ -533,6 +514,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -831,14 +813,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@@ -958,6 +940,14 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
+ // Try to convert a double to a signed 32-bit integer. If the double value
+ // can be exactly represented as an integer, the code jumps to 'done' and
+ // 'result' contains the integer value. Otherwise, the code falls through.
+ void TryFastDoubleToInt32(Register result,
+ DwVfpRegister double_input,
+ DwVfpRegister double_scratch,
+ Label* done);
+
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// Clears the z flag (ne condition) if an overflow occurs.
@@ -988,7 +978,7 @@ class MacroAssembler: public Assembler {
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
DwVfpRegister double_input,
- SwVfpRegister single_scratch,
+ DwVfpRegister double_scratch,
Register scratch,
Register scratch2,
Register scratch3);
@@ -1002,11 +992,18 @@ class MacroAssembler: public Assembler {
Register source,
Register scratch);
+ // Check whether d16-d31 are available on the CPU. The result is given by the
+ // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise.
+ void CheckFor32DRegs(Register scratch);
+
+
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, Condition cond = al);
+ void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@@ -1057,9 +1054,9 @@ class MacroAssembler: public Assembler {
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
- void SetCallCDoubleArguments(DoubleRegister dreg);
- void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
- void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg);
+ void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2);
+ void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1075,7 +1072,7 @@ class MacroAssembler: public Assembler {
int num_reg_arguments,
int num_double_arguments);
- void GetCFunctionDoubleResult(const DoubleRegister dst);
+ void GetCFunctionDoubleResult(const DwVfpRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
@@ -1288,8 +1285,8 @@ class MacroAssembler: public Assembler {
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
- DoubleRegister input_reg,
- DoubleRegister temp_double_reg);
+ DwVfpRegister input_reg,
+ DwVfpRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
@@ -1312,6 +1309,15 @@ class MacroAssembler: public Assembler {
// in r0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationSiteInfo support. Arrays may have an associated
+ // AllocationSiteInfo object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to eq
+ void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
+ Register scratch_reg);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1364,9 +1370,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
@@ -1395,7 +1401,6 @@ class CodePatcher {
private:
byte* address_; // The address of the code being patched.
- int instructions_; // Number of instructions of the expected patch size.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
diff --git a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
index 17b8677..acb24ef 100644
--- a/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/3rdparty/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -204,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, &not_at_start);
// If we did, are we still at the start of the input?
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
__ ldr(r0, MemOperand(frame_pointer(), kStartIndex));
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_not_at_start);
// If we did, are we still at the start of the input?
__ ldr(r1, MemOperand(frame_pointer(), kInputStart));
@@ -261,7 +261,7 @@ void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str,
for (int i = 0; i < str.length(); i++) {
if (mode_ == ASCII) {
__ ldrb(r1, MemOperand(r0, char_size(), PostIndex));
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
__ cmp(r1, Operand(str[i]));
} else {
__ ldrh(r1, MemOperand(r0, char_size(), PostIndex));
@@ -337,8 +337,17 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ b(ne, &fail);
__ sub(r3, r3, Operand('a'));
__ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter?
+#ifndef ENABLE_LATIN_1
__ b(hi, &fail);
-
+#else
+ __ b(ls, &loop_check); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(r3, r3, Operand(224 - 'a'));
+ __ cmp(r3, Operand(254 - 224));
+ __ b(hi, &fail); // Weren't Latin-1 letters.
+ __ cmp(r3, Operand(247 - 224)); // Check for 247.
+ __ b(eq, &fail);
+#endif
__ bind(&loop_check);
__ cmp(r0, r1);
@@ -385,7 +394,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
}
// Check if function returned non-zero for success or zero for failure.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
// On success, increment position by length of capture.
__ add(current_input_offset(), current_input_offset(), Operand(r4));
@@ -508,7 +517,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ mov(r0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else {
@@ -517,7 +526,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Operand(ByteArray::kHeaderSize - kHeapObjectTag));
}
__ ldrb(r0, MemOperand(r0, r1));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_bit_set);
}
@@ -613,7 +622,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(eq, on_no_match);
return true;
}
@@ -627,7 +636,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ cmp(r0, Operand(0));
+ __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) {
__ bind(&done);
@@ -675,7 +684,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Set frame pointer in space for it if this is not a direct call
// from generated code.
__ add(frame_pointer(), sp, Operand(4 * kPointerSize));
- __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand::Zero());
__ push(r0); // Make room for success counter and initialize it to 0.
__ push(r0); // Make room for "position - 1" constant (value is irrelevant).
// Check if we have space on the stack for registers.
@@ -700,7 +709,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ bind(&stack_limit_hit);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returned value is non-zero, we exit with the returned value as result.
__ b(ne, &return_r0);
@@ -728,7 +737,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label load_char_start_regexp, start_regexp;
// Load newline if index is at start, previous character otherwise.
- __ cmp(r1, Operand(0, RelocInfo::NONE));
+ __ cmp(r1, Operand::Zero());
__ b(ne, &load_char_start_regexp);
__ mov(current_character(), Operand('\n'), LeaveCC, eq);
__ jmp(&start_regexp);
@@ -834,7 +843,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Not a zero-length match, restart.
__ b(ne, &load_char_start_regexp);
// Offset from the end is zero if we already reached the end.
- __ cmp(current_input_offset(), Operand(0));
+ __ cmp(current_input_offset(), Operand::Zero());
__ b(eq, &exit_label_);
// Advance current position after a zero-length match.
__ add(current_input_offset(),
@@ -873,7 +882,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
SafeCallTarget(&check_preempt_label_);
CallCheckStackGuardState(r0);
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
// If returning non-zero, we should end execution with the given
// result as return value.
__ b(ne, &return_r0);
@@ -900,7 +909,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(eq, &exit_with_exception);
// Otherwise use return value as new stack pointer.
__ mov(backtrack_stackpointer(), r0);
@@ -1150,7 +1159,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1181,7 +1190,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.cc b/src/3rdparty/v8/src/arm/simulator-arm.cc
index bd7f1bd..b7bc839 100644
--- a/src/3rdparty/v8/src/arm/simulator-arm.cc
+++ b/src/3rdparty/v8/src/arm/simulator-arm.cc
@@ -34,6 +34,7 @@
#include "disasm.h"
#include "assembler.h"
+#include "codegen.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
@@ -398,7 +399,7 @@ void ArmDebugger::Debug() {
int32_t words;
if (argc == next_arg) {
words = 10;
- } else if (argc == next_arg + 1) {
+ } else {
if (!GetValue(argv[next_arg], &words)) {
words = 10;
}
@@ -764,8 +765,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
// All registers are initialized to zero to start with
// even though s_registers_ & d_registers_ share the same
// physical registers in the target.
- for (int i = 0; i < num_s_registers; i++) {
- vfp_register[i] = 0;
+ for (int i = 0; i < num_d_registers * 2; i++) {
+ vfp_registers_[i] = 0;
}
n_flag_FPSCR_ = false;
z_flag_FPSCR_ = false;
@@ -900,7 +901,7 @@ double Simulator::get_double_from_register_pair(int reg) {
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
- char buffer[2 * sizeof(vfp_register[0])];
+ char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
@@ -935,13 +936,13 @@ int32_t Simulator::get_pc() const {
// Getting from and setting into VFP registers.
void Simulator::set_s_register(int sreg, unsigned int value) {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
- vfp_register[sreg] = value;
+ vfp_registers_[sreg] = value;
}
unsigned int Simulator::get_s_register(int sreg) const {
ASSERT((sreg >= 0) && (sreg < num_s_registers));
- return vfp_register[sreg];
+ return vfp_registers_[sreg];
}
@@ -949,12 +950,12 @@ template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
- char buffer[register_size * sizeof(vfp_register[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_register[0]));
- memcpy(&vfp_register[reg_index * register_size], buffer,
- register_size * sizeof(vfp_register[0]));
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ memcpy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
@@ -962,13 +963,13 @@ template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ASSERT(reg_index >= 0);
if (register_size == 1) ASSERT(reg_index < num_s_registers);
- if (register_size == 2) ASSERT(reg_index < num_d_registers);
+ if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
- char buffer[register_size * sizeof(vfp_register[0])];
- memcpy(buffer, &vfp_register[register_size * reg_index],
- register_size * sizeof(vfp_register[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_register[0]));
+ char buffer[register_size * sizeof(vfp_registers_[0])];
+ memcpy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
@@ -977,8 +978,8 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
- *y = vfp_register[1];
+ *x = vfp_registers_[0];
+ *y = vfp_registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -996,7 +997,7 @@ void Simulator::GetFpArgs(double* x, double* y) {
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
+ *x = vfp_registers_[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -1012,7 +1013,7 @@ void Simulator::GetFpArgs(double* x) {
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
- *x = vfp_register[0];
+ *x = vfp_registers_[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
@@ -1031,10 +1032,10 @@ void Simulator::GetFpArgs(double* x, int32_t* y) {
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
- char buffer[2 * sizeof(vfp_register[0])];
+ char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- memcpy(vfp_register, buffer, sizeof(buffer));
+ memcpy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
@@ -1613,15 +1614,19 @@ void Simulator::HandleVList(Instruction* instr) {
address += 1;
} else {
if (load) {
- set_s_register_from_sinteger(
- 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
- set_s_register_from_sinteger(
- 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
+ int32_t data[] = {
+ ReadW(reinterpret_cast<int32_t>(address), instr),
+ ReadW(reinterpret_cast<int32_t>(address + 1), instr)
+ };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(reg, d);
} else {
- WriteW(reinterpret_cast<int32_t>(address),
- get_sinteger_from_s_register(2 * reg), instr);
- WriteW(reinterpret_cast<int32_t>(address + 1),
- get_sinteger_from_s_register(2 * reg + 1), instr);
+ int32_t data[2];
+ double d = get_double_from_d_register(reg);
+ memcpy(data, &d, 8);
+ WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
+ WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
address += 2;
}
@@ -1687,18 +1692,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
- arg2 = vfp_register[2];
- arg3 = vfp_register[3];
+ arg0 = vfp_registers_[0];
+ arg1 = vfp_registers_[1];
+ arg2 = vfp_registers_[2];
+ arg3 = vfp_registers_[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
+ arg0 = vfp_registers_[0];
+ arg1 = vfp_registers_[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- arg0 = vfp_register[0];
- arg1 = vfp_register[1];
+ arg0 = vfp_registers_[0];
+ arg1 = vfp_registers_[1];
arg2 = get_register(0);
break;
default:
@@ -1776,7 +1781,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
- set_register(r0, (int32_t) *result);
+ set_register(r0, reinterpret_cast<int32_t>(*result));
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
SimulatorRuntimeDirectGetterCall target =
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
@@ -1793,7 +1798,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
}
- set_register(r0, (int32_t) *result);
+ set_register(r0, reinterpret_cast<int32_t>(*result));
} else {
// builtin call.
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
@@ -2778,6 +2783,26 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
+ } else if ((instr->Opc1Value() == 0x0)) {
+ // vmla, vmls
+ const bool is_vmls = (instr->Opc3Value() & 0x1);
+
+ if (instr->SzValue() != 0x1) {
+ UNREACHABLE(); // Not used by V8.
+ }
+
+ const double dd_val = get_double_from_d_register(vd);
+ const double dn_val = get_double_from_d_register(vn);
+ const double dm_val = get_double_from_d_register(vm);
+
+ // Note: we do the mul and add/sub in separate steps to avoid getting a
+ // result with too high precision.
+ set_d_register_from_double(vd, dn_val * dm_val);
+ if (is_vmls) {
+ set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
+ } else {
+ set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
+ }
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@@ -2796,6 +2821,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
+ } else if ((instr->VLValue() == 0x0) &&
+ (instr->VCValue() == 0x1) &&
+ (instr->Bit(23) == 0x0)) {
+ // vmov (ARM core register to scalar)
+ int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
+ double dd_value = get_double_from_d_register(vd);
+ int32_t data[2];
+ memcpy(data, &dd_value, 8);
+ data[instr->Bit(21)] = get_register(instr->RtValue());
+ memcpy(&dd_value, data, 8);
+ set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) &&
@@ -3069,15 +3105,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
if (src_precision == kDoublePrecision) {
if (unsigned_integer) {
- set_d_register_from_double(dst,
- static_cast<double>((uint32_t)val));
+ set_d_register_from_double(
+ dst, static_cast<double>(static_cast<uint32_t>(val)));
} else {
set_d_register_from_double(dst, static_cast<double>(val));
}
} else {
if (unsigned_integer) {
- set_s_register_from_float(dst,
- static_cast<float>((uint32_t)val));
+ set_s_register_from_float(
+ dst, static_cast<float>(static_cast<uint32_t>(val)));
} else {
set_s_register_from_float(dst, static_cast<float>(val));
}
@@ -3134,31 +3170,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
switch (instr->OpcodeValue()) {
case 0x2:
// Load and store double to two GP registers
- if (instr->Bits(7, 4) != 0x1) {
+ if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) {
UNIMPLEMENTED(); // Not used by V8.
} else {
int rt = instr->RtValue();
int rn = instr->RnValue();
- int vm = instr->VmValue();
+ int vm = instr->VFPMRegValue(kDoublePrecision);
if (instr->HasL()) {
- int32_t rt_int_value = get_sinteger_from_s_register(2*vm);
- int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1);
-
- set_register(rt, rt_int_value);
- set_register(rn, rn_int_value);
+ int32_t data[2];
+ double d = get_double_from_d_register(vm);
+ memcpy(data, &d, 8);
+ set_register(rt, data[0]);
+ set_register(rn, data[1]);
} else {
- int32_t rs_val = get_register(rt);
- int32_t rn_val = get_register(rn);
-
- set_s_register_from_sinteger(2*vm, rs_val);
- set_s_register_from_sinteger((2*vm+1), rn_val);
+ int32_t data[] = { get_register(rt), get_register(rn) };
+ double d;
+ memcpy(&d, data, 8);
+ set_d_register_from_double(vm, d);
}
}
break;
case 0x8:
- case 0xC: { // Load and store double to memory.
+ case 0xA:
+ case 0xC:
+ case 0xE: { // Load and store double to memory.
int rn = instr->RnValue();
- int vd = instr->VdValue();
+ int vd = instr->VFPDRegValue(kDoublePrecision);
int offset = instr->Immed8Value();
if (!instr->HasU()) {
offset = -offset;
@@ -3166,18 +3203,29 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
if (instr->HasL()) {
// Load double from memory: vldr.
- set_s_register_from_sinteger(2*vd, ReadW(address, instr));
- set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr));
+ int32_t data[] = {
+ ReadW(address, instr),
+ ReadW(address + 4, instr)
+ };
+ double val;
+ memcpy(&val, data, 8);
+ set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
- WriteW(address, get_sinteger_from_s_register(2*vd), instr);
- WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr);
+ int32_t data[2];
+ double val = get_double_from_d_register(vd);
+ memcpy(data, &val, 8);
+ WriteW(address, data[0], instr);
+ WriteW(address + 4, data[1], instr);
}
break;
}
case 0x4:
case 0x5:
+ case 0x6:
+ case 0x7:
case 0x9:
+ case 0xB:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
@@ -3287,33 +3335,7 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -3367,6 +3389,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(r0, va_arg(parameters, int32_t));
+ set_register(r1, va_arg(parameters, int32_t));
+ set_register(r2, va_arg(parameters, int32_t));
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -3377,6 +3430,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (use_eabi_hardfloat()) {
+ set_d_register_from_double(0, d0);
+ set_d_register_from_double(1, d1);
+ } else {
+ int buffer[2];
+ ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
+ set_dw_register(0, buffer);
+ memcpy(buffer, &d1, sizeof(d1));
+ set_dw_register(2, buffer);
+ }
+ CallInternal(entry);
+ if (use_eabi_hardfloat()) {
+ return get_double_from_d_register(0);
+ } else {
+ return get_double_from_register_pair(0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/src/3rdparty/v8/src/arm/simulator-arm.h b/src/3rdparty/v8/src/arm/simulator-arm.h
index abc91bb..907a590 100644
--- a/src/3rdparty/v8/src/arm/simulator-arm.h
+++ b/src/3rdparty/v8/src/arm/simulator-arm.h
@@ -142,7 +142,9 @@ class Simulator {
num_s_registers = 32,
d0 = 0, d1, d2, d3, d4, d5, d6, d7,
d8, d9, d10, d11, d12, d13, d14, d15,
- num_d_registers = 16
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ num_d_registers = 32
};
explicit Simulator(Isolate* isolate);
@@ -205,6 +207,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -356,6 +360,8 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
+ void CallInternal(byte* entry);
+
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
@@ -367,7 +373,7 @@ class Simulator {
bool v_flag_;
// VFP architecture state.
- unsigned int vfp_register[num_s_registers];
+ unsigned int vfp_registers_[num_d_registers * 2];
bool n_flag_FPSCR_;
bool z_flag_FPSCR_;
bool c_flag_FPSCR_;
diff --git a/src/3rdparty/v8/src/arm/stub-cache-arm.cc b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
index 9fc39d4..03aa359 100644
--- a/src/3rdparty/v8/src/arm/stub-cache-arm.cc
+++ b/src/3rdparty/v8/src/arm/stub-cache-arm.cc
@@ -130,14 +130,14 @@ static void ProbeTable(Isolate* isolate,
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be internalized and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
Handle<String> name,
Register scratch0,
Register scratch1) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -327,19 +327,25 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ ldr(dst, FieldMemOperand(src, offset));
- } else {
+ PropertyIndex index) {
+ DoGenerateFastPropertyLoad(
+ masm, dst, src, index.is_inobject(holder), index.translate(holder));
+}
+
+
+void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index) {
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ ldr(dst, FieldMemOperand(dst, offset));
+ src = dst;
}
+ __ ldr(dst, FieldMemOperand(src, offset));
}
@@ -587,6 +593,15 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ Handle<Code> code = (kind == Code::STORE_IC)
+ ? masm->isolate()->builtins()->StoreIC_Miss()
+ : masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
static void GenerateCallFunction(MacroAssembler* masm,
Handle<Object> object,
const ParameterCount& arguments,
@@ -697,7 +712,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(r0, api_call_info);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
@@ -730,7 +745,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
__ mov(ip, Operand(argc));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
// v8::Arguments::is_construct_call = 0
- __ mov(ip, Operand(0));
+ __ mov(ip, Operand::Zero());
__ str(ip, MemOperand(r0, 3 * kPointerSize));
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
@@ -1003,7 +1018,7 @@ static void StoreIntAsFloat(MacroAssembler* masm,
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
- __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
@@ -1048,46 +1063,15 @@ static void StoreIntAsFloat(MacroAssembler* masm,
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
- __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
- } else {
- __ mov(loword, Operand(0, RelocInfo::NONE));
- __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
+void StubCompiler::GenerateTailCall(Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1096,7 +1080,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register scratch2,
Handle<String> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1124,8 +1110,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsInternalizedString()) {
+ name = factory()->InternalizeString(name);
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
@@ -1137,9 +1123,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
reg = holder_reg; // From now on the object will be in holder_reg.
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ Register map_reg = scratch1;
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ Handle<Map> current_map(current->map());
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+ } else {
+ __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
@@ -1152,7 +1144,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ mov(reg, Operand(prototype));
@@ -1170,9 +1162,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1190,123 +1184,123 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
- __ Ret();
+void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ b(success);
+ __ bind(miss);
+ GenerateLoadMiss(masm(), kind());
+ }
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label miss;
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
- // Return the constant value.
- __ LoadHeapObject(r0, value);
- __ Ret();
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ cmp(scratch2(), Operand(callback));
+ __ b(ne, &miss);
+ }
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
}
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
- &probe_done,
- dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ cmp(scratch2, Operand(callback));
- __ b(ne, miss);
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Label* success,
+ Handle<GlobalObject> global) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
+ }
+
+ if (!last->HasFastProperties()) {
+ __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
+ __ cmp(scratch2(), Operand(isolate()->factory()->null_value()));
+ __ b(ne, &miss);
+ }
+
+ HandlerFrontendFooter(success, &miss);
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index) {
+ GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
+ __ Ret();
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
- }
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+ // Return the constant value.
+ __ LoadHeapObject(r0, value);
+ __ Ret();
+}
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ __ push(receiver());
+ __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch3, callback);
- __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ __ Move(scratch3(), callback);
+ __ ldr(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
} else {
- __ Move(scratch3, Handle<Object>(callback->data()));
+ __ Move(scratch3(), Handle<Object>(callback->data(),
+ callback->GetIsolate()));
}
- __ Push(reg, scratch3);
- __ mov(scratch3, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, name_reg);
+ __ Push(reg, scratch3());
+ __ mov(scratch3(), Operand(ExternalReference::isolate_address()));
+ __ Push(scratch3(), name());
__ mov(r0, sp); // r0 = Handle<String>
const int kApiStackSpace = 1;
@@ -1315,7 +1309,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
// Create AccessorInfo instance on the stack above the exit frame with
// scratch2 (internal::Object** args_) as the data.
- __ str(scratch2, MemOperand(sp, 1 * kPointerSize));
+ __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
const int kStackUnwindSpace = 5;
@@ -1329,22 +1323,15 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void BaseLoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1353,8 +1340,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1364,17 +1352,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1382,93 +1367,42 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
+ __ Push(receiver(), holder_reg, this->name());
} else {
- __ Push(holder_reg, name_reg);
+ __ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1());
__ b(eq, &interceptor_failed);
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), r0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ Move(scratch2, callback);
- // holder_reg is either receiver or scratch1.
- if (!receiver.is(holder_reg)) {
- ASSERT(scratch1.is(holder_reg));
- __ Push(receiver, holder_reg);
- } else {
- __ push(receiver);
- __ push(holder_reg);
- }
- __ ldr(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ mov(scratch1, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, scratch1, scratch2, name_reg);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
@@ -1545,7 +1479,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -1618,7 +1552,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = r6;
Register end_elements = r5;
@@ -1629,10 +1563,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
-
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
@@ -1647,7 +1580,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
- Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@@ -1667,6 +1599,40 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ r0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into r0 and calculate new length.
+ __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(r0, r0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(r0, r4);
+ __ b(gt, &call_builtin);
+
+ __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ r4, r0, elements, r3, r5, r2, r9,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ Drop(argc + 1);
+ __ Ret();
+
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1678,6 +1644,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
+
+ __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(r7, ip);
+ __ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
@@ -1688,7 +1659,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
__ mov(r2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
@@ -1699,7 +1672,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin);
__ mov(r2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(r3, r3, &call_builtin);
@@ -1922,8 +1897,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
r0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r1;
Register index = r4;
@@ -2002,8 +1978,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
r0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r1, r3, r4, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r1, r3, r4, name, &miss);
Register receiver = r0;
Register index = r4;
@@ -2033,7 +2010,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
__ Drop(argc + 1);
__ Ret();
}
@@ -2235,7 +2212,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
// Check for -0.
- __ cmp(r0, Operand(0, RelocInfo::NONE));
+ __ cmp(r0, Operand::Zero());
__ b(&restore_fpscr_and_return, ne);
// r5 already holds the HeapNumber exponent.
__ tst(r5, Operand(HeapNumber::kSignMask));
@@ -2418,23 +2395,15 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- r2 : name
// -- lr : return address
// -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2468,78 +2437,95 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(r1, &fast);
- __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
+ __ b(ne, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r1, ip);
- __ b(eq, &fast);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r1, ip);
- __ b(ne, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- r0, holder, r3, r1, r4, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(r1, &fast);
+ __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
+ break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(eq, &fast);
+ __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+ __ cmp(r1, ip);
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ r0, holder, r3, r1, r4, name, &miss);
break;
+ }
}
+ __ b(success);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
+
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
// Return the generated code.
return GetCode(function);
@@ -2676,7 +2662,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
+ Handle<ExecutableAccessorInfo> callback) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : receiver
@@ -2875,79 +2861,45 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ JumpIfSmi(r0, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Handle<GlobalObject> global) {
+ Label success;
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
-
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, r1, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { r0, r2, r3, r1, r4, r5 };
+ return registers;
+}
- GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { r1, r0, r2, r3, r4, r5 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Operand(name));
+ __ b(ne, miss);
}
@@ -2988,90 +2940,18 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(r0, &miss);
- CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
+ Handle<GlobalObject> global,
Handle<JSGlobalPropertyCell> cell,
Handle<String> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- r0 : receiver
- // -- r2 : name
- // -- lr : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
- // Check that the map of the global has not changed.
- __ JumpIfSmi(r0, &miss);
- CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss);
+ __ CheckMap(
+ receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ mov(r3, Operand(cell));
@@ -3084,188 +2964,16 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ b(eq, &miss);
}
- __ mov(r0, r4);
+ HandlerFrontendFooter(&success, &miss);
+ __ bind(&success);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+ __ mov(r0, r4);
__ Ret();
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadArrayLength(masm(), r1, r2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- // Check the key is the cached one.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
-
- // Check the name hasn't changed.
- __ cmp(r0, Operand(name));
- __ b(ne, &miss);
-
- GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
}
@@ -3277,43 +2985,56 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- r1 : receiver
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode(isolate());
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode(isolate());
+ __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<String> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(r1, &miss);
+
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
+
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
__ mov(ip, Operand(receiver_maps->at(current)));
- __ cmp(r2, ip);
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq);
+ __ cmp(map_reg, ip);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
+ GenerateLoadMiss(masm(), kind());
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ InlineCacheState state =
+ receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(Code::IC_FRAGMENT, type, name, state);
}
@@ -3370,7 +3091,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+ KeyedStoreElementStub(is_js_array,
+ elements_kind,
+ grow_mode_).GetCode(isolate());
__ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK);
@@ -3417,7 +3140,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET, al);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
@@ -3524,7 +3247,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ bind(&next);
} else {
// Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
+ isolate());
__ mov(r2, Operand(constant));
__ str(r2, MemOperand(r5, kPointerSize, PostIndex));
}
@@ -3684,339 +3408,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = r0;
- Register receiver = r1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // r3: elements array
-
- // Check that the index is in range.
- __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
- __ cmp(key, ip);
- // Unsigned comparison catches both negative and too-large values.
- __ b(hs, &miss_force_generic);
-
- __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
- // r3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = r2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(value, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(value, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 1));
- __ vldr(s0, r2, 0);
- } else {
- __ ldr(value, MemOperand(r3, key, LSL, 1));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- __ add(r2, r3, Operand(key, LSL, 2));
- __ vldr(d0, r2, 0);
- } else {
- __ add(r4, r3, Operand(key, LSL, 2));
- // r4: pointer to the beginning of the double we want to load.
- __ ldr(r2, MemOperand(r4, 0));
- __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // r2: value
- // For float array type:
- // s0: value (if VFP3 is supported)
- // r2: value (if VFP3 is not supported)
- // For double array type:
- // d0: value (if VFP3 is supported)
- // r2/r3: value (if VFP3 is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ cmp(value, Operand(0xC0000000));
- __ b(mi, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
-
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ add(r0, r5, Operand(kHeapObjectTag));
- __ vmov(s0, value);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r5, HeapNumber::kValueOffset);
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't touch r0 or r1 as they are needed if allocation
- // fails.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
- // Now we can use r0 for the result as key is not needed any more.
- __ mov(r0, r5);
- Register dst1 = r1;
- Register dst2 = r3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- d0,
- dst1,
- dst2,
- r9,
- s0);
- __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- Label box_int, done;
- __ tst(value, Operand(0xC0000000));
- __ b(ne, &box_int);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- __ bind(&box_int);
- __ vmov(s0, value);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
-
- __ vcvt_f64_u32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ tst(value, Operand(0x80000000));
- __ b(ne, &box_int_0);
- __ tst(value, Operand(0x40000000));
- __ b(ne, &box_int_1);
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
-
- Register hiword = value; // r2.
- Register loword = r3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, r4, 0);
- __ b(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, r4, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
-
- __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r4);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vcvt_f64_f32(d0, s0);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
- // VFP is not available, do manual single to double conversion.
-
- // r2: floating point value (binary32)
- // r3: heap number for result
-
- // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
- // the slow case from here.
- __ and_(r0, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
- // the slow case from here.
- __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
- __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(r1, Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(r1, Operand(0xff));
- __ mov(r1, Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(r1,
- r1,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(r2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
- __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
-
- __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
- __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
-
- __ mov(r0, r3);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatures::Scope scope(VFP2);
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
- __ vstr(d0, r2, HeapNumber::kValueOffset);
-
- __ add(r0, r2, Operand(kHeapObjectTag));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use r0 and r1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
-
- __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
- __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
- __ mov(r0, r4);
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ mov(r0, Operand(value, LSL, kSmiTagSize));
- __ Ret();
- }
-
- // Slow case, key and receiver still in r0 and r1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, r2, r3);
-
- // ---------- S t a t e --------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
-
- __ Push(r1, r0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -4096,7 +3487,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst1, dst2.
+ d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2);
@@ -4155,7 +3546,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
+ __ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
@@ -4263,18 +3654,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
+ __ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative then result is 0.
- __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
+ __ mov(r5, Operand::Zero(), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big then result is minimal value.
@@ -4290,14 +3681,14 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
- __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
+ __ rsb(r9, r9, Operand::Zero());
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
- __ teq(r7, Operand(0, RelocInfo::NONE));
- __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ teq(r7, Operand::Zero());
+ __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
__ bind(&done);
switch (elements_kind) {
@@ -4361,118 +3752,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ AssertFastElements(r2);
-
- // Check that the key is within bounds.
- __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ cmp(r0, Operand(r3));
- __ b(hs, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ ldr(r4,
- MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r4, ip);
- __ b(eq, &miss_force_generic);
- __ mov(r0, r4);
- __ Ret();
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = r0;
- Register receiver_reg = r1;
- Register elements_reg = r2;
- Register heap_number_reg = r2;
- Register indexed_double_offset = r3;
- Register scratch = r4;
- Register scratch2 = r5;
- Register scratch3 = r6;
- Register heap_number_map = r7;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
-
- // Get the elements array.
- __ ldr(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ cmp(key_reg, Operand(scratch));
- __ b(hs, &miss_force_generic);
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ add(indexed_double_offset, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- __ b(&miss_force_generic, eq);
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ ldr(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ str(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(r0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
@@ -4648,9 +3927,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
- // -- r3 : scratch
+ // -- r3 : scratch (elements backing store)
// -- r4 : scratch
// -- r5 : scratch
+ // -- r6 : scratch
+ // -- r7 : scratch
+ // -- r9 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4663,6 +3945,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
+ Register scratch5 = r9;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4693,7 +3976,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
- receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4742,8 +4024,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
+ // Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
@@ -4751,6 +4032,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ mov(scratch1, elements_reg);
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ // All registers after this are overwritten.
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ scratch5,
+ &transition_elements_kind);
+
+ __ mov(scratch1, Operand(kHoleNanLower32));
+ __ mov(scratch2, Operand(kHoleNanUpper32));
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ int offset = FixedDoubleArray::OffsetOfElementAt(i);
+ __ str(scratch1, FieldMemOperand(elements_reg, offset));
+ __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
+ }
+
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4763,7 +4063,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
diff --git a/src/3rdparty/v8/src/array.js b/src/3rdparty/v8/src/array.js
index 37053ce..9b0bfe1 100644
--- a/src/3rdparty/v8/src/array.js
+++ b/src/3rdparty/v8/src/array.js
@@ -413,6 +413,7 @@ function ArrayJoin(separator) {
["Array.prototype.join"]);
}
+ var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -422,7 +423,7 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
- return Join(this, TO_UINT32(this.length), separator, ConvertToString);
+ return Join(this, length, separator, ConvertToString);
}
@@ -1558,6 +1559,12 @@ function SetUpArray() {
"push", getFunction("push", ArrayPush),
"splice", getFunction("splice", ArraySplice)
));
+
+ SetUpLockedPrototype(InternalPackedArray, $Array(), $Array(
+ "join", getFunction("join", ArrayJoin),
+ "pop", getFunction("pop", ArrayPop),
+ "push", getFunction("push", ArrayPush)
+ ));
}
SetUpArray();
diff --git a/src/3rdparty/v8/src/assembler.cc b/src/3rdparty/v8/src/assembler.cc
index c0867dd..2cd9114 100644
--- a/src/3rdparty/v8/src/assembler.cc
+++ b/src/3rdparty/v8/src/assembler.cc
@@ -91,6 +91,7 @@ namespace internal {
struct DoubleConstant BASE_EMBEDDED {
double min_int;
double one_half;
+ double minus_one_half;
double minus_zero;
double zero;
double uint8_max_value;
@@ -103,10 +104,15 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
+static bool math_exp_data_initialized = false;
+static Mutex* math_exp_data_mutex = NULL;
+static double* math_exp_constants_array = NULL;
+static double* math_exp_log_table_array = NULL;
+
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
-AssemblerBase::AssemblerBase(Isolate* isolate)
+AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
: isolate_(isolate),
jit_cookie_(0),
emit_debug_code_(FLAG_debug_code),
@@ -114,6 +120,62 @@ AssemblerBase::AssemblerBase(Isolate* isolate)
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
+
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+ if (isolate->assembler_spare_buffer() != NULL) {
+ buffer = isolate->assembler_spare_buffer();
+ isolate->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
+ own_buffer_ = true;
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ own_buffer_ = false;
+ }
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+
+ pc_ = buffer_;
+}
+
+
+AssemblerBase::~AssemblerBase() {
+ if (own_buffer_) {
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of PredictableCodeSizeScope
+
+PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
+ int expected_size)
+ : assembler_(assembler),
+ expected_size_(expected_size),
+ start_offset_(assembler->pc_offset()),
+ old_value_(assembler->predictable_code_size()) {
+ assembler_->set_predictable_code_size(true);
+}
+
+
+PredictableCodeSizeScope::~PredictableCodeSizeScope() {
+ // TODO(svenpanne) Remove the 'if' when everything works.
+ if (expected_size_ >= 0) {
+ CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
+ }
+ assembler_->set_predictable_code_size(old_value_);
}
@@ -628,11 +690,28 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
// Implementation of RelocInfo
+#ifdef DEBUG
+bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
+ // Ensure there are no code targets or embedded objects present in the
+ // deoptimization entries, they would require relocation after code
+ // generation.
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) |
+ RelocInfo::kApplyMask;
+ RelocIterator it(desc, mode_mask);
+ return !it.done();
+}
+#endif
+
+
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
- case RelocInfo::NONE:
- return "no reloc";
+ case RelocInfo::NONE32:
+ return "no reloc 32";
+ case RelocInfo::NONE64:
+ return "no reloc 64";
case RelocInfo::EMBEDDED_OBJECT:
return "embedded object";
case RelocInfo::CONSTRUCT_CALL:
@@ -755,7 +834,8 @@ void RelocInfo::Verify() {
case INTERNAL_REFERENCE:
case CONST_POOL:
case DEBUG_BREAK_SLOT:
- case NONE:
+ case NONE32:
+ case NONE64:
break;
case NUMBER_OF_MODES:
UNREACHABLE();
@@ -774,12 +854,77 @@ void RelocInfo::Verify() {
void ExternalReference::SetUp() {
double_constants.min_int = kMinInt;
double_constants.one_half = 0.5;
+ double_constants.minus_one_half = -0.5;
double_constants.minus_zero = -0.0;
double_constants.uint8_max_value = 255;
double_constants.zero = 0.0;
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
+
+ math_exp_data_mutex = OS::CreateMutex();
+}
+
+
+void ExternalReference::InitializeMathExpData() {
+ // Early return?
+ if (math_exp_data_initialized) return;
+
+ math_exp_data_mutex->Lock();
+ if (!math_exp_data_initialized) {
+ // If this is changed, generated code must be adapted too.
+ const int kTableSizeBits = 11;
+ const int kTableSize = 1 << kTableSizeBits;
+ const double kTableSizeDouble = static_cast<double>(kTableSize);
+
+ math_exp_constants_array = new double[9];
+ // Input values smaller than this always return 0.
+ math_exp_constants_array[0] = -708.39641853226408;
+ // Input values larger than this always return +Infinity.
+ math_exp_constants_array[1] = 709.78271289338397;
+ math_exp_constants_array[2] = V8_INFINITY;
+ // The rest is black magic. Do not attempt to understand it. It is
+ // loosely based on the "expd" function published at:
+ // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
+ const double constant3 = (1 << kTableSizeBits) / log(2.0);
+ math_exp_constants_array[3] = constant3;
+ math_exp_constants_array[4] =
+ static_cast<double>(static_cast<int64_t>(3) << 51);
+ math_exp_constants_array[5] = 1 / constant3;
+ math_exp_constants_array[6] = 3.0000000027955394;
+ math_exp_constants_array[7] = 0.16666666685227835;
+ math_exp_constants_array[8] = 1;
+
+ math_exp_log_table_array = new double[kTableSize];
+ for (int i = 0; i < kTableSize; i++) {
+ double value = pow(2, i / kTableSizeDouble);
+
+ uint64_t bits = BitCast<uint64_t, double>(value);
+ bits &= (static_cast<uint64_t>(1) << 52) - 1;
+ double mantissa = BitCast<double, uint64_t>(bits);
+
+ // <just testing>
+ uint64_t doublebits;
+ memcpy(&doublebits, &value, sizeof doublebits);
+ doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
+ double mantissa2;
+ memcpy(&mantissa2, &doublebits, sizeof mantissa2);
+ CHECK_EQ(mantissa, mantissa2);
+ // </just testing>
+
+ math_exp_log_table_array[i] = mantissa;
+ }
+
+ math_exp_data_initialized = true;
+ }
+ math_exp_data_mutex->Unlock();
+}
+
+
+void ExternalReference::TearDownMathExpData() {
+ delete[] math_exp_constants_array;
+ delete[] math_exp_log_table_array;
+ delete math_exp_data_mutex;
}
@@ -931,6 +1076,20 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
+ExternalReference ExternalReference::log_enter_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
+}
+
+
+ExternalReference ExternalReference::log_leave_external_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
+}
+
+
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@@ -1000,18 +1159,21 @@ ExternalReference ExternalReference::new_space_allocation_limit_address(
}
-ExternalReference ExternalReference::handle_scope_level_address() {
- return ExternalReference(HandleScope::current_level_address());
+ExternalReference ExternalReference::handle_scope_level_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_level_address(isolate));
}
-ExternalReference ExternalReference::handle_scope_next_address() {
- return ExternalReference(HandleScope::current_next_address());
+ExternalReference ExternalReference::handle_scope_next_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_next_address(isolate));
}
-ExternalReference ExternalReference::handle_scope_limit_address() {
- return ExternalReference(HandleScope::current_limit_address());
+ExternalReference ExternalReference::handle_scope_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(HandleScope::current_limit_address(isolate));
}
@@ -1049,6 +1211,12 @@ ExternalReference ExternalReference::address_of_one_half() {
}
+ExternalReference ExternalReference::address_of_minus_one_half() {
+ return ExternalReference(
+ reinterpret_cast<void*>(&double_constants.minus_one_half));
+}
+
+
ExternalReference ExternalReference::address_of_minus_zero() {
return ExternalReference(
reinterpret_cast<void*>(&double_constants.minus_zero));
@@ -1217,12 +1385,45 @@ ExternalReference ExternalReference::math_log_double_function(
}
+ExternalReference ExternalReference::math_exp_constants(int constant_index) {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(
+ reinterpret_cast<void*>(math_exp_constants_array + constant_index));
+}
+
+
+ExternalReference ExternalReference::math_exp_log_table() {
+ ASSERT(math_exp_data_initialized);
+ return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
+}
+
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
}
+ExternalReference ExternalReference::ForDeoptEntry(Address entry) {
+ return ExternalReference(entry);
+}
+
+
+double power_helper(double x, double y) {
+ int y_int = static_cast<int>(y);
+ if (y == y_int) {
+ return power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ }
+ if (y == 0.5) {
+ return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ if (y == -0.5) {
+ return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
+ }
+ return power_double_double(x, y);
+}
+
+
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@@ -1362,6 +1563,10 @@ void PositionsRecorder::RecordPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddPositionEvent(jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}
@@ -1374,6 +1579,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) {
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
}
#endif
+ LOG_CODE_EVENT(assembler_->isolate(),
+ CodeLinePosInfoAddStatementPositionEvent(
+ jit_handler_data_,
+ assembler_->pc_offset(),
+ pos));
}
diff --git a/src/3rdparty/v8/src/assembler.h b/src/3rdparty/v8/src/assembler.h
index 037799d..06c3b76 100644
--- a/src/3rdparty/v8/src/assembler.h
+++ b/src/3rdparty/v8/src/assembler.h
@@ -56,7 +56,8 @@ struct StatsCounter;
class AssemblerBase: public Malloced {
public:
- explicit AssemblerBase(Isolate* isolate);
+ AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
+ virtual ~AssemblerBase();
Isolate* isolate() const { return isolate_; }
int jit_cookie() const { return jit_cookie_; }
@@ -71,6 +72,20 @@ class AssemblerBase: public Malloced {
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+
+ static const int kMinimalBufferSize = 4*KB;
+
+ protected:
+ // The buffer into which code and relocation info are generated. It could
+ // either be owned by the assembler or be provided externally.
+ byte* buffer_;
+ int buffer_size_;
+ bool own_buffer_;
+
+ // The program counter, which points into the buffer above and moves forward.
+ byte* pc_;
+
private:
Isolate* isolate_;
int jit_cookie_;
@@ -83,18 +98,13 @@ class AssemblerBase: public Malloced {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
- explicit PredictableCodeSizeScope(AssemblerBase* assembler)
- : assembler_(assembler) {
- old_value_ = assembler_->predictable_code_size();
- assembler_->set_predictable_code_size(true);
- }
-
- ~PredictableCodeSizeScope() {
- assembler_->set_predictable_code_size(old_value_);
- }
+ PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
+ ~PredictableCodeSizeScope();
private:
AssemblerBase* assembler_;
+ int expected_size_;
+ int start_offset_;
bool old_value_;
};
@@ -238,7 +248,8 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
- NONE, // never recorded
+ NONE32, // never recorded 32-bit value
+ NONE64, // never recorded 64-bit value
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
FIRST_REAL_RELOC_MODE = CODE_TARGET,
@@ -258,6 +269,9 @@ class RelocInfo BASE_EMBEDDED {
RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
+ RelocInfo(byte* pc, double data64)
+ : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) {
+ }
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE &&
@@ -305,6 +319,9 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
+ static inline bool IsNone(Mode mode) {
+ return mode == NONE32 || mode == NONE64;
+ }
static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE;
}
@@ -315,6 +332,7 @@ class RelocInfo BASE_EMBEDDED {
void set_pc(byte* pc) { pc_ = pc; }
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
+ double data64() const { return data64_; }
Code* host() const { return host_; }
// Apply a relocation by delta bytes
@@ -391,6 +409,12 @@ class RelocInfo BASE_EMBEDDED {
// debugger.
INLINE(bool IsPatchedDebugBreakSlotSequence());
+#ifdef DEBUG
+ // Check whether the given code contains relocation information that
+ // either is position-relative or movable by the garbage collector.
+ static bool RequiresRelocation(const CodeDesc& desc);
+#endif
+
#ifdef ENABLE_DISASSEMBLER
// Printing
static const char* RelocModeName(Mode rmode);
@@ -413,7 +437,10 @@ class RelocInfo BASE_EMBEDDED {
// comment).
byte* pc_;
Mode rmode_;
- intptr_t data_;
+ union {
+ intptr_t data_;
+ double data64_;
+ };
Code* host_;
// Code and Embedded Object pointers on some platforms are stored split
// across two consecutive 32-bit instructions. Heap management
@@ -594,6 +621,8 @@ class ExternalReference BASE_EMBEDDED {
};
static void SetUp();
+ static void InitializeMathExpData();
+ static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
@@ -649,6 +678,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
+ // Log support.
+ static ExternalReference log_enter_external_function(Isolate* isolate);
+ static ExternalReference log_leave_external_function(Isolate* isolate);
+
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@@ -691,9 +724,9 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
- static ExternalReference handle_scope_next_address();
- static ExternalReference handle_scope_limit_address();
- static ExternalReference handle_scope_level_address();
+ static ExternalReference handle_scope_next_address(Isolate* isolate);
+ static ExternalReference handle_scope_limit_address(Isolate* isolate);
+ static ExternalReference handle_scope_level_address(Isolate* isolate);
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
@@ -703,6 +736,7 @@ class ExternalReference BASE_EMBEDDED {
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
+ static ExternalReference address_of_minus_one_half();
static ExternalReference address_of_minus_zero();
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
@@ -715,8 +749,15 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
+ static ExternalReference math_exp_constants(int constant_index);
+ static ExternalReference math_exp_log_table();
+
static ExternalReference page_flags(Page* page);
+ static ExternalReference ForDeoptEntry(Address entry);
+
+ static ExternalReference cpu_features();
+
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -810,6 +851,7 @@ class PositionsRecorder BASE_EMBEDDED {
#ifdef ENABLE_GDB_JIT_INTERFACE
gdbjit_lineinfo_ = NULL;
#endif
+ jit_handler_data_ = NULL;
}
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -829,7 +871,15 @@ class PositionsRecorder BASE_EMBEDDED {
return lineinfo;
}
#endif
+ void AttachJITHandlerData(void* user_data) {
+ jit_handler_data_ = user_data;
+ }
+ void* DetachJITHandlerData() {
+ void* old_data = jit_handler_data_;
+ jit_handler_data_ = NULL;
+ return old_data;
+ }
// Set current position to pos.
void RecordPosition(int pos);
@@ -852,6 +902,9 @@ class PositionsRecorder BASE_EMBEDDED {
GDBJITLineInfo* gdbjit_lineinfo_;
#endif
+ // Currently jit_handler_data_ is used to store JITHandler-specific data
+ // over the lifetime of a PositionsRecorder
+ void* jit_handler_data_;
friend class PreservePositionScope;
DISALLOW_COPY_AND_ASSIGN(PositionsRecorder);
@@ -916,6 +969,7 @@ inline int NumberOfBitsSet(uint32_t x) {
bool EvalComparison(Token::Value op, double op1, double op2);
// Computes pow(x, y) with the special cases in the spec for Math.pow.
+double power_helper(double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
diff --git a/src/3rdparty/v8/src/ast.cc b/src/3rdparty/v8/src/ast.cc
index 5d7baf2..dc5865e 100644
--- a/src/3rdparty/v8/src/ast.cc
+++ b/src/3rdparty/v8/src/ast.cc
@@ -29,6 +29,7 @@
#include <math.h> // For isfinite.
#include "builtins.h"
+#include "code-stubs.h"
#include "conversions.h"
#include "hashmap.h"
#include "parser.h"
@@ -96,13 +97,14 @@ VariableProxy::VariableProxy(Isolate* isolate,
position_(position),
interface_(interface) {
// Names must be canonicalized for fast equality checks.
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
}
void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind
+ ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
@@ -185,8 +187,8 @@ ObjectLiteral::Property::Property(Literal* key,
key_ = key;
value_ = value;
Object* k = *key->handle();
- if (k->IsSymbol() &&
- isolate->heap()->Proto_symbol()->Equals(String::cast(k))) {
+ if (k->IsInternalizedString() &&
+ isolate->heap()->proto_string()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@@ -416,12 +418,14 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
receiver_types_.Clear();
if (key()->IsPropertyName()) {
- if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
+ ArrayLengthStub array_stub(Code::LOAD_IC);
+ FunctionPrototypeStub proto_stub(Code::LOAD_IC);
+ StringLengthStub string_stub(Code::LOAD_IC, false);
+ if (oracle->LoadIsStub(this, &array_stub)) {
is_array_length_ = true;
- } else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
+ } else if (oracle->LoadIsStub(this, &string_stub)) {
is_string_length_ = true;
- } else if (oracle->LoadIsBuiltin(this,
- Builtins::kLoadIC_FunctionPrototype)) {
+ } else if (oracle->LoadIsStub(this, &proto_stub)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@@ -434,7 +438,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (is_monomorphic_) {
receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this),
zone);
- } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
+ } else if (oracle->LoadIsPolymorphic(this)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_);
}
@@ -456,7 +460,7 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle,
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ } else if (oracle->StoreIsPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
@@ -472,7 +476,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
// Record receiver type for monomorphic keyed stores.
receiver_types_.Add(
oracle->StoreMonomorphicReceiverType(id), zone);
- } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) {
+ } else if (oracle->StoreIsPolymorphic(id)) {
receiver_types_.Reserve(kMaxKeyedPolymorphism, zone);
oracle->CollectKeyedReceiverTypes(id, &receiver_types_);
}
@@ -481,11 +485,12 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
+ if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
- } else if (info.IsSymbol()) {
- compare_type_ = SYMBOL_ONLY;
- } else if (info.IsNonSymbol()) {
+ } else if (info.IsInternalizedString()) {
+ compare_type_ = NAME_ONLY;
+ } else if (info.IsNonInternalizedString()) {
compare_type_ = STRING_ONLY;
} else if (info.IsNonPrimitive()) {
compare_type_ = OBJECT_ONLY;
@@ -610,18 +615,7 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->CallNewIsMonomorphic(this);
if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(this);
- }
-}
-
-
-void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- TypeInfo info = oracle->CompareType(this);
- if (info.IsSmi()) {
- compare_type_ = SMI_ONLY;
- } else if (info.IsNonPrimitive()) {
- compare_type_ = OBJECT_ONLY;
- } else {
- ASSERT(compare_type_ == NONE);
+ elements_kind_ = oracle->GetCallNewElementsKind(this);
}
}
@@ -636,14 +630,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
-bool AstVisitor::CheckStackOverflow() {
- if (stack_overflow_) return true;
- StackLimitCheck check(isolate_);
- if (!check.HasOverflowed()) return false;
- return (stack_overflow_ = true);
-}
-
-
void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
@@ -1080,16 +1066,14 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
-// We currently do not optimize any modules. Note in particular, that module
-// instance objects associated with ModuleLiterals are allocated during
-// scope resolution, and references to them are embedded into the code.
-// That code may hence neither be cached nor re-compiled.
+// We currently do not optimize any modules.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
+DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
@@ -1113,8 +1097,9 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
// optimize them.
add_flag(kDontInline);
} else if (node->function()->intrinsic_type == Runtime::INLINE &&
- (node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
- node->name()->IsEqualTo(CStrVector("_Arguments")))) {
+ (node->name()->IsOneByteEqualTo(
+ STATIC_ASCII_VECTOR("_ArgumentsLength")) ||
+ node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) {
// Don't inline the %_ArgumentsLength or %_Arguments because their
// implementation will not work. There is no stack frame to get them
// from.
diff --git a/src/3rdparty/v8/src/ast.h b/src/3rdparty/v8/src/ast.h
index d3f90b2..f7e23e8 100644
--- a/src/3rdparty/v8/src/ast.h
+++ b/src/3rdparty/v8/src/ast.h
@@ -75,6 +75,7 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \
V(Block) \
+ V(ModuleStatement) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@@ -522,7 +523,7 @@ class ModuleDeclaration: public Declaration {
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
- : Declaration(proxy, LET, scope),
+ : Declaration(proxy, MODULE, scope),
module_(module) {
}
@@ -645,6 +646,25 @@ class ModuleUrl: public Module {
};
+class ModuleStatement: public Statement {
+ public:
+ DECLARE_NODE_TYPE(ModuleStatement)
+
+ VariableProxy* proxy() const { return proxy_; }
+ Block* body() const { return body_; }
+
+ protected:
+ ModuleStatement(VariableProxy* proxy, Block* body)
+ : proxy_(proxy),
+ body_(body) {
+ }
+
+ private:
+ VariableProxy* proxy_;
+ Block* body_;
+};
+
+
class IterationStatement: public BreakableStatement {
public:
// Type testing & conversion.
@@ -948,7 +968,7 @@ class CaseClause: public ZoneObject {
TypeFeedbackId CompareId() { return compare_id_; }
void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; }
+ bool IsNameCompare() { return compare_type_ == NAME_ONLY; }
bool IsStringCompare() { return compare_type_ == STRING_ONLY; }
bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
@@ -960,7 +980,7 @@ class CaseClause: public ZoneObject {
enum CompareTypeFeedback {
NONE,
SMI_ONLY,
- SYMBOL_ONLY,
+ NAME_ONLY,
STRING_ONLY,
OBJECT_ONLY
};
@@ -1151,7 +1171,7 @@ class Literal: public Expression {
DECLARE_NODE_TYPE(Literal)
virtual bool IsPropertyName() {
- if (handle_->IsSymbol()) {
+ if (handle_->IsInternalizedString()) {
uint32_t ignored;
return !String::cast(*handle_)->AsArrayIndex(&ignored);
}
@@ -1417,7 +1437,7 @@ class VariableProxy: public Expression {
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
- // Bind this proxy to the variable var.
+ // Bind this proxy to the variable var. Interfaces must match.
void BindTo(Variable* var);
protected:
@@ -1575,6 +1595,7 @@ class CallNew: public Expression {
Handle<JSFunction> target() { return target_; }
BailoutId ReturnId() const { return return_id_; }
+ ElementsKind elements_kind() const { return elements_kind_; }
protected:
CallNew(Isolate* isolate,
@@ -1586,7 +1607,8 @@ class CallNew: public Expression {
arguments_(arguments),
pos_(pos),
is_monomorphic_(false),
- return_id_(GetNextId(isolate)) { }
+ return_id_(GetNextId(isolate)),
+ elements_kind_(GetInitialFastElementsKind()) { }
private:
Expression* expression_;
@@ -1597,6 +1619,7 @@ class CallNew: public Expression {
Handle<JSFunction> target_;
const BailoutId return_id_;
+ ElementsKind elements_kind_;
};
@@ -1777,9 +1800,6 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
- bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
- bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -1796,8 +1816,7 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
- pos_(pos),
- compare_type_(NONE) {
+ pos_(pos) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1806,9 +1825,6 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
-
- enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
- CompareTypeFeedback compare_type_;
};
@@ -2481,40 +2497,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy)
class AstVisitor BASE_EMBEDDED {
public:
- AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { }
+ AstVisitor() {}
virtual ~AstVisitor() { }
// Stack overflow check and dynamic dispatch.
- void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); }
+ virtual void Visit(AstNode* node) = 0;
// Iteration left-to-right.
virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
- // Stack overflow tracking support.
- bool HasStackOverflow() const { return stack_overflow_; }
- bool CheckStackOverflow();
-
- // If a stack-overflow exception is encountered when visiting a
- // node, calling SetStackOverflow will make sure that the visitor
- // bails out without visiting more nodes.
- void SetStackOverflow() { stack_overflow_ = true; }
- void ClearStackOverflow() { stack_overflow_ = false; }
-
// Individual AST nodes.
#define DEF_VISIT(type) \
virtual void Visit##type(type* node) = 0;
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
+};
- protected:
- Isolate* isolate() { return isolate_; }
- private:
- Isolate* isolate_;
- bool stack_overflow_;
-};
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \
+public: \
+ virtual void Visit(AstNode* node) { \
+ if (!CheckStackOverflow()) node->Accept(this); \
+ } \
+ \
+ void SetStackOverflow() { stack_overflow_ = true; } \
+ void ClearStackOverflow() { stack_overflow_ = false; } \
+ bool HasStackOverflow() const { return stack_overflow_; } \
+ \
+ bool CheckStackOverflow() { \
+ if (stack_overflow_) return true; \
+ StackLimitCheck check(isolate_); \
+ if (!check.HasOverflowed()) return false; \
+ return (stack_overflow_ = true); \
+ } \
+ \
+private: \
+ void InitializeAstVisitor() { \
+ isolate_ = Isolate::Current(); \
+ stack_overflow_ = false; \
+ } \
+ Isolate* isolate() { return isolate_; } \
+ \
+ Isolate* isolate_; \
+ bool stack_overflow_
// ----------------------------------------------------------------------------
@@ -2649,6 +2676,11 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
+ ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
+ ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
+ VISIT_AND_RETURN(ModuleStatement, stmt)
+ }
+
ExpressionStatement* NewExpressionStatement(Expression* expression) {
ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
VISIT_AND_RETURN(ExpressionStatement, stmt)
diff --git a/src/3rdparty/v8/src/atomicops.h b/src/3rdparty/v8/src/atomicops.h
index ec92ce6..d4fe042 100644
--- a/src/3rdparty/v8/src/atomicops.h
+++ b/src/3rdparty/v8/src/atomicops.h
@@ -151,7 +151,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal
// Include our platform specific implementation.
-#if defined(_MSC_VER) && \
+#if defined(THREAD_SANITIZER)
+#include "atomicops_internals_tsan.h"
+#elif defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) || defined(_WIN32_WCE))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
diff --git a/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h b/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
index 39c9850..4a8e562 100644
--- a/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
+++ b/src/3rdparty/v8/src/atomicops_internals_arm_qnx.h
@@ -42,7 +42,9 @@ inline void MemoryBarrier() {
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
- return _smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), old_value, new_value);
+ return _smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr),
+ old_value,
+ new_value);
}
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
@@ -56,7 +58,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
// Atomic exchange the old value with an incremented one.
Atomic32 old_value = *ptr;
Atomic32 new_value = old_value + increment;
- if (_smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr), old_value, new_value)) {
+ if (_smp_cmpxchg(reinterpret_cast<volatile unsigned*>(ptr),
+ old_value,
+ new_value)) {
// The exchange took place as expected.
return new_value;
}
diff --git a/src/3rdparty/v8/src/atomicops_internals_tsan.h b/src/3rdparty/v8/src/atomicops_internals_tsan.h
new file mode 100644
index 0000000..6559336
--- /dev/null
+++ b/src/3rdparty/v8/src/atomicops_internals_tsan.h
@@ -0,0 +1,335 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// This file is an internal atomic implementation for compiler-based
+// ThreadSanitizer. Use base/atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
+#define V8_ATOMICOPS_INTERNALS_TSAN_H_
+
+// This struct is not part of the public API of this module; clients may not
+// use it. (However, it's exported via BASE_EXPORT because clients implicitly
+// do use it at link time by inlining these functions.)
+// Features of this x86. Values may not be correct before main() is run,
+// but are set conservatively.
+struct AtomicOps_x86CPUFeatureStruct {
+ bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
+ // after acquire compare-and-swap.
+ bool has_sse2; // Processor has SSE2.
+};
+extern struct AtomicOps_x86CPUFeatureStruct
+ AtomicOps_Internalx86CPUFeatures;
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+typedef enum {
+ __tsan_memory_order_relaxed = (1 << 0) + 100500,
+ __tsan_memory_order_consume = (1 << 1) + 100500,
+ __tsan_memory_order_acquire = (1 << 2) + 100500,
+ __tsan_memory_order_release = (1 << 3) + 100500,
+ __tsan_memory_order_acq_rel = (1 << 4) + 100500,
+ __tsan_memory_order_seq_cst = (1 << 5) + 100500,
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
+ __tsan_memory_order mo);
+
+void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
+ __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
+ __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
+ __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
+ __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
+
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ return __tsan_atomic32_exchange(ptr, new_value,
+ __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ return increment + __tsan_atomic32_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 cmp = old_value;
+ __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_relaxed);
+ return cmp;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value) {
+ return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment) {
+ return increment + __tsan_atomic64_fetch_add(ptr, increment,
+ __tsan_memory_order_acq_rel);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+ __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+ return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_acquire);
+ return cmp;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value) {
+ Atomic64 cmp = old_value;
+ __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
+ __tsan_memory_order_release);
+ return cmp;
+}
+
+inline void MemoryBarrier() {
+ __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
+}
+
+} // namespace internal
+} // namespace v8
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/3rdparty/v8/src/bootstrapper.cc b/src/3rdparty/v8/src/bootstrapper.cc
index c06d73d..368047c 100644
--- a/src/3rdparty/v8/src/bootstrapper.cc
+++ b/src/3rdparty/v8/src/bootstrapper.cc
@@ -63,8 +63,9 @@ NativesExternalStringResource::NativesExternalStringResource(
}
-Bootstrapper::Bootstrapper()
- : nesting_(0),
+Bootstrapper::Bootstrapper(Isolate* isolate)
+ : isolate_(isolate),
+ nesting_(0),
extensions_cache_(Script::TYPE_EXTENSION),
delete_these_non_arrays_on_tear_down_(NULL),
delete_these_arrays_on_tear_down_(NULL) {
@@ -73,9 +74,7 @@ Bootstrapper::Bootstrapper()
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
+ Heap* heap = isolate_->heap();
if (heap->natives_source_cache()->get(index)->IsUndefined()) {
// We can use external strings for the natives.
Vector<const char> source = Natives::GetRawScriptSource(index);
@@ -84,10 +83,11 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
source.start(),
source.length());
Handle<String> source_code =
- factory->NewExternalStringFromAscii(resource);
+ isolate_->factory()->NewExternalStringFromAscii(resource);
heap->natives_source_cache()->set(index, *source_code);
}
- Handle<Object> cached_source(heap->natives_source_cache()->get(index));
+ Handle<Object> cached_source(heap->natives_source_cache()->get(index),
+ isolate_);
return Handle<String>::cast(cached_source);
}
@@ -205,6 +205,9 @@ class Genesis BASE_EMBEDDED {
// Used for creating a context from scratch.
void InstallNativeFunctions();
void InstallExperimentalNativeFunctions();
+ Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
+ const char* name,
+ ElementsKind elements_kind);
bool InstallNatives();
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
@@ -230,9 +233,11 @@ class Genesis BASE_EMBEDDED {
// provided.
static bool InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions);
- static bool InstallExtension(const char* name,
+ static bool InstallExtension(Isolate* isolate,
+ const char* name,
ExtensionStates* extension_states);
- static bool InstallExtension(v8::RegisteredExtension* current,
+ static bool InstallExtension(Isolate* isolate,
+ v8::RegisteredExtension* current,
ExtensionStates* extension_states);
static void InstallSpecialObjects(Handle<Context> native_context);
bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
@@ -268,8 +273,11 @@ class Genesis BASE_EMBEDDED {
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
- static bool CompileNative(Vector<const char> name, Handle<String> source);
- static bool CompileScriptCached(Vector<const char> name,
+ static bool CompileNative(Isolate* isolate,
+ Vector<const char> name,
+ Handle<String> source);
+ static bool CompileScriptCached(Isolate* isolate,
+ Vector<const char> name,
Handle<String> source,
SourceCodeCache* cache,
v8::Extension* extension,
@@ -298,13 +306,12 @@ void Bootstrapper::Iterate(ObjectVisitor* v) {
Handle<Context> Bootstrapper::CreateEnvironment(
- Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions) {
- HandleScope scope;
+ HandleScope scope(isolate_);
Handle<Context> env;
- Genesis genesis(isolate, global_object, global_template, extensions);
+ Genesis genesis(isolate_, global_object, global_template, extensions);
env = genesis.result();
if (!env.is_null()) {
if (InstallExtensions(env, extensions)) {
@@ -353,11 +360,11 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
bool is_ecma_native) {
Isolate* isolate = target->GetIsolate();
Factory* factory = isolate->factory();
- Handle<String> symbol = factory->LookupAsciiSymbol(name);
+ Handle<String> internalized_name = factory->InternalizeUtf8String(name);
Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
Handle<JSFunction> function = prototype.is_null() ?
- factory->NewFunctionWithoutPrototype(symbol, call_code) :
- factory->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithoutPrototype(internalized_name, call_code) :
+ factory->NewFunctionWithPrototype(internalized_name,
type,
instance_size,
prototype,
@@ -372,9 +379,9 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
}
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- target, symbol, function, attributes));
+ target, internalized_name, function, attributes));
if (is_ecma_native) {
- function->shared()->set_instance_class_name(*symbol);
+ function->shared()->set_instance_class_name(*internalized_name);
}
function->shared()->set_native(true);
return function;
@@ -400,19 +407,19 @@ void Genesis::SetFunctionInstanceDescriptor(
map->set_instance_descriptors(*descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs);
+ CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -420,7 +427,7 @@ void Genesis::SetFunctionInstanceDescriptor(
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
}
- CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
map->AppendDescriptor(&d, witness);
}
}
@@ -465,7 +472,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
+ Handle<String> object_name = Handle<String>(heap->Object_string());
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
@@ -478,19 +485,36 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<JSObject> prototype = factory->NewJSObject(
- isolate->object_function(),
- TENURED);
+ Handle<Map> object_prototype_map =
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ Handle<DescriptorArray> prototype_descriptors(
+ factory->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*prototype_descriptors);
+ Handle<Foreign> object_prototype(
+ factory->NewForeign(&Accessors::ObjectPrototype));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ object_prototype_map->set_instance_descriptors(*prototype_descriptors);
+
+ { // Add __proto__.
+ CallbacksDescriptor d(heap->proto_string(), *object_prototype, attribs);
+ object_prototype_map->AppendDescriptor(&d, witness);
+ }
+
+ Handle<JSObject> prototype = factory->NewJSObjectFromMap(
+ object_prototype_map,
+ TENURED);
native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
}
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
- Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
+ Handle<String> empty_string =
+ factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
Handle<JSFunction> empty_function =
- factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE);
+ factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE);
// --- E m p t y ---
Handle<Code> code =
@@ -498,7 +522,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
Builtins::kEmptyFunction));
empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
- Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}"));
+ Handle<String> source =
+ factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}"));
Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
@@ -541,19 +566,19 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
map->set_instance_descriptors(*descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs);
+ CallbacksDescriptor d(*factory()->length_string(), *length, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add name.
- CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *name, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add arguments.
- CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs);
+ CallbacksDescriptor d(*factory()->arguments_string(), *arguments, attribs);
map->AppendDescriptor(&d, witness);
}
{ // Add caller.
- CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs);
+ CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs);
map->AppendDescriptor(&d, witness);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -561,7 +586,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) {
attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY);
}
- CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs);
+ CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs);
map->AppendDescriptor(&d, witness);
}
}
@@ -570,7 +595,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() {
if (throw_type_error_function.is_null()) {
- Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError");
+ Handle<String> name = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ThrowTypeError"));
throw_type_error_function =
factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE);
Handle<Code> code(isolate()->builtins()->builtin(
@@ -645,8 +671,8 @@ static void SetAccessors(Handle<Map> map,
void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) {
- SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction());
- SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction());
+ SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction());
+ SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction());
}
@@ -713,7 +739,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
Handle<FunctionTemplateInfo> global_constructor =
Handle<FunctionTemplateInfo>(
FunctionTemplateInfo::cast(data->constructor()));
- Handle<Object> proto_template(global_constructor->prototype_template());
+ Handle<Object> proto_template(global_constructor->prototype_template(),
+ isolate());
if (!proto_template->IsUndefined()) {
js_global_template =
Handle<ObjectTemplateInfo>::cast(proto_template);
@@ -721,7 +748,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
}
if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
js_global_function =
@@ -734,7 +761,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
JSObject::cast(js_global_function->instance_prototype()));
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, factory()->constructor_symbol(),
+ prototype, factory()->constructor_string(),
isolate()->object_function(), NONE));
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
@@ -755,7 +782,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(heap()->empty_symbol());
+ Handle<String> name = Handle<String>(heap()->empty_string());
Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
Builtins::kIllegal));
global_proxy_function =
@@ -771,7 +798,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
factory()->OuterGlobalObject);
}
- Handle<String> global_name = factory()->LookupAsciiSymbol("global");
+ Handle<String> global_name = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("global"));
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
@@ -812,7 +840,8 @@ void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
ForceSetProperty(builtins_global,
- factory()->LookupAsciiSymbol("global"),
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("global")),
inner_global,
attributes);
// Set up the reference from the global object to the builtins object.
@@ -844,7 +873,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
- Handle<String> object_name = Handle<String>(heap->Object_symbol());
+ Handle<String> object_name = Handle<String>(heap->Object_string());
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
inner_global, object_name,
@@ -880,7 +909,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
initial_map->set_instance_descriptors(*array_descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs);
+ CallbacksDescriptor d(*factory->length_string(), *array_length, attribs);
array_function->initial_map()->AppendDescriptor(&d, witness);
}
@@ -929,7 +958,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
string_map->set_instance_descriptors(*string_descriptors);
{ // Add length.
- CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs);
+ CallbacksDescriptor d(*factory->length_string(), *string_length, attribs);
string_map->AppendDescriptor(&d, witness);
}
}
@@ -966,28 +995,28 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(heap->source_symbol(),
+ FieldDescriptor field(heap->source_string(),
JSRegExp::kSourceFieldIndex,
final);
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(heap->global_symbol(),
+ FieldDescriptor field(heap->global_string(),
JSRegExp::kGlobalFieldIndex,
final);
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(heap->ignore_case_symbol(),
+ FieldDescriptor field(heap->ignore_case_string(),
JSRegExp::kIgnoreCaseFieldIndex,
final);
initial_map->AppendDescriptor(&field, witness);
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(heap->multiline_symbol(),
+ FieldDescriptor field(heap->multiline_string(),
JSRegExp::kMultilineFieldIndex,
final);
initial_map->AppendDescriptor(&field, witness);
@@ -996,7 +1025,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(heap->last_index_symbol(),
+ FieldDescriptor field(heap->last_index_string(),
JSRegExp::kLastIndexFieldIndex,
writable);
initial_map->AppendDescriptor(&field, witness);
@@ -1014,7 +1043,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
proto_map->set_prototype(native_context()->initial_object_prototype());
Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
- heap->query_colon_symbol());
+ heap->query_colon_string());
proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
heap->false_value());
proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@@ -1051,7 +1080,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
+ Handle<String> arguments_string = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Arguments"));
Handle<Code> code = Handle<Code>(
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
@@ -1059,14 +1089,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
JSObject::cast(native_context()->object_function()->prototype()));
Handle<JSFunction> function =
- factory->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithPrototype(arguments_string,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
prototype,
code,
false);
ASSERT(!function->has_initial_map());
- function->shared()->set_instance_class_name(*symbol);
+ function->shared()->set_instance_class_name(*arguments_string);
function->shared()->set_expected_nof_properties(2);
Handle<JSObject> result = factory->NewJSObject(function);
@@ -1075,22 +1105,22 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// callee must be added as the second property.
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_symbol(),
+ result, factory->length_string(),
factory->undefined_value(), DONT_ENUM));
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->callee_symbol(),
+ result, factory->callee_string(),
factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->callee_symbol(), &lookup);
+ result->LocalLookup(heap->callee_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
- result->LocalLookup(heap->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1149,17 +1179,17 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
map->set_instance_descriptors(*descriptors);
{ // length
- FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
+ FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM);
map->AppendDescriptor(&d, witness);
}
{ // callee
- CallbacksDescriptor d(*factory->callee_symbol(),
+ CallbacksDescriptor d(*factory->callee_string(),
*callee,
attributes);
map->AppendDescriptor(&d, witness);
}
{ // caller
- CallbacksDescriptor d(*factory->caller_symbol(),
+ CallbacksDescriptor d(*factory->caller_string(),
*caller,
attributes);
map->AppendDescriptor(&d, witness);
@@ -1181,14 +1211,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Add length property only for strict mode boilerplate.
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
- result, factory->length_symbol(),
+ result, factory->length_string(),
factory->undefined_value(), DONT_ENUM));
#ifdef DEBUG
LookupResult lookup(isolate);
- result->LocalLookup(heap->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_string(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1203,13 +1233,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code> code = Handle<Code>(
isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> context_extension_fun =
- factory->NewFunction(factory->empty_symbol(),
+ factory->NewFunction(factory->empty_string(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
true);
- Handle<String> name = factory->LookupAsciiSymbol("context_extension");
+ Handle<String> name = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("context_extension"));
context_extension_fun->shared()->set_instance_class_name(*name);
native_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1221,7 +1252,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsFunction));
Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
native_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -1233,7 +1264,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsConstructor));
Handle<JSFunction> delegate =
- factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
native_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -1242,8 +1273,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the out of memory slot.
native_context()->set_out_of_memory(heap->false_value());
- // Initialize the data slot.
- native_context()->set_data(heap->undefined_value());
+ // Initialize the embedder data slot.
+ Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
+ native_context()->set_embedder_data(*embedder_data);
{
// Initialize the random seed slot.
@@ -1288,7 +1320,7 @@ bool Genesis::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->NativesSourceLookup(index);
- return CompileNative(name, source_code);
+ return CompileNative(isolate, name, source_code);
}
@@ -1298,23 +1330,25 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
Handle<String> source_code =
factory->NewStringFromAscii(
ExperimentalNatives::GetRawScriptSource(index));
- return CompileNative(name, source_code);
+ return CompileNative(isolate, name, source_code);
}
-bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
- HandleScope scope;
- Isolate* isolate = source->GetIsolate();
+bool Genesis::CompileNative(Isolate* isolate,
+ Vector<const char> name,
+ Handle<String> source) {
+ HandleScope scope(isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate->debugger()->set_compiling_natives(true);
#endif
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(isolate);
if (check.HasOverflowed()) return false;
- bool result = CompileScriptCached(name,
+ bool result = CompileScriptCached(isolate,
+ name,
source,
NULL,
NULL,
@@ -1329,20 +1363,21 @@ bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
}
-bool Genesis::CompileScriptCached(Vector<const char> name,
+bool Genesis::CompileScriptCached(Isolate* isolate,
+ Vector<const char> name,
Handle<String> source,
SourceCodeCache* cache,
v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context) {
- Factory* factory = source->GetIsolate()->factory();
- HandleScope scope;
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
Handle<SharedFunctionInfo> function_info;
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
- ASSERT(source->IsAsciiRepresentation());
+ ASSERT(source->IsOneByteRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
@@ -1374,7 +1409,8 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
Handle<Object> receiver =
Handle<Object>(use_runtime_context
? top_context->builtins()
- : top_context->global_object());
+ : top_context->global_object(),
+ isolate);
bool has_pending_exception;
Execution::Call(fun, receiver, 0, NULL, &has_pending_exception);
if (has_pending_exception) return false;
@@ -1382,16 +1418,17 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \
- Object* var##_native = \
- native_context()->builtins()->GetPropertyNoExceptionThrown( \
- *var##_name); \
+#define INSTALL_NATIVE(Type, name, var) \
+ Handle<String> var##_name = \
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \
+ Object* var##_native = \
+ native_context()->builtins()->GetPropertyNoExceptionThrown( \
+ *var##_name); \
native_context()->set_##var(Type::cast(var##_native));
void Genesis::InstallNativeFunctions() {
- HandleScope scope;
+ HandleScope scope(isolate());
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
@@ -1411,6 +1448,9 @@ void Genesis::InstallNativeFunctions() {
}
void Genesis::InstallExperimentalNativeFunctions() {
+ if (FLAG_harmony_symbols) {
+ INSTALL_NATIVE(JSObject, "SymbolDelegate", symbol_delegate);
+ }
if (FLAG_harmony_proxies) {
INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap);
INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap);
@@ -1427,8 +1467,68 @@ void Genesis::InstallExperimentalNativeFunctions() {
#undef INSTALL_NATIVE
+Handle<JSFunction> Genesis::InstallInternalArray(
+ Handle<JSBuiltinsObject> builtins,
+ const char* name,
+ ElementsKind elements_kind) {
+ // --- I n t e r n a l A r r a y ---
+ // An array constructor on the builtins object that works like
+ // the public Array constructor, except that its prototype
+ // doesn't inherit from Object.prototype.
+ // To be used only for internal work by builtins. Instances
+ // must not be leaked to user code.
+ Handle<JSFunction> array_function =
+ InstallFunction(builtins,
+ name,
+ JS_ARRAY_TYPE,
+ JSArray::kSize,
+ isolate()->initial_object_prototype(),
+ Builtins::kInternalArrayCode,
+ true);
+ Handle<JSObject> prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ SetPrototype(array_function, prototype);
+
+ // TODO(mvstanton): For performance reasons, this code would have to
+ // be changed to successfully run with FLAG_optimize_constructed_arrays.
+ // The next checkin to enable FLAG_optimize_constructed_arrays by
+ // default will address this.
+ CHECK(!FLAG_optimize_constructed_arrays);
+ array_function->shared()->set_construct_stub(
+ isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
+
+ array_function->shared()->DontAdaptArguments();
+
+ MaybeObject* maybe_map = array_function->initial_map()->Copy();
+ Map* new_map;
+ if (!maybe_map->To(&new_map)) return Handle<JSFunction>::null();
+ new_map->set_elements_kind(elements_kind);
+ array_function->set_initial_map(new_map);
+
+ // Make "length" magic on instances.
+ Handle<Map> initial_map(array_function->initial_map());
+ Handle<DescriptorArray> array_descriptors(
+ factory()->NewDescriptorArray(0, 1));
+ DescriptorArray::WhitenessWitness witness(*array_descriptors);
+
+ Handle<Foreign> array_length(factory()->NewForeign(
+ &Accessors::ArrayLength));
+ PropertyAttributes attribs = static_cast<PropertyAttributes>(
+ DONT_ENUM | DONT_DELETE);
+ initial_map->set_instance_descriptors(*array_descriptors);
+
+ { // Add length.
+ CallbacksDescriptor d(
+ *factory()->length_string(), *array_length, attribs);
+ array_function->initial_map()->AppendDescriptor(&d, witness);
+ }
+
+ return array_function;
+}
+
+
bool Genesis::InstallNatives() {
- HandleScope scope;
+ HandleScope scope(isolate());
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
@@ -1436,11 +1536,12 @@ bool Genesis::InstallNatives() {
Handle<Code> code = Handle<Code>(
isolate()->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> builtins_fun =
- factory()->NewFunction(factory()->empty_symbol(),
+ factory()->NewFunction(factory()->empty_string(),
JS_BUILTINS_OBJECT_TYPE,
JSBuiltinsObject::kSize, code, true);
- Handle<String> name = factory()->LookupAsciiSymbol("builtins");
+ Handle<String> name =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
builtins_fun->shared()->set_instance_class_name(*name);
builtins_fun->initial_map()->set_dictionary_map(true);
builtins_fun->initial_map()->set_prototype(heap()->null_value());
@@ -1459,11 +1560,12 @@ bool Genesis::InstallNatives() {
// global object.
static const PropertyAttributes attributes =
static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- Handle<String> global_symbol = factory()->LookupAsciiSymbol("global");
- Handle<Object> global_obj(native_context()->global_object());
+ Handle<String> global_string =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
+ Handle<Object> global_obj(native_context()->global_object(), isolate());
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- builtins, global_symbol, global_obj, attributes));
+ builtins, global_string, global_obj, attributes));
// Set up the reference from the global object to the builtins object.
JSGlobalObject::cast(native_context()->global_object())->
@@ -1471,7 +1573,7 @@ bool Genesis::InstallNatives() {
// Create a bridge function that has context in the native context.
Handle<JSFunction> bridge =
- factory()->NewFunction(factory()->empty_symbol(),
+ factory()->NewFunction(factory()->empty_string(),
factory()->undefined_value());
ASSERT(bridge->context() == *isolate()->native_context());
@@ -1502,41 +1604,52 @@ bool Genesis::InstallNatives() {
Handle<Foreign> script_source(
factory()->NewForeign(&Accessors::ScriptSource));
Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName));
- Handle<String> id_symbol(factory()->LookupAsciiSymbol("id"));
+ Handle<String> id_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("id")));
Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId));
- Handle<String> line_offset_symbol(
- factory()->LookupAsciiSymbol("line_offset"));
+ Handle<String> line_offset_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_offset")));
Handle<Foreign> script_line_offset(
factory()->NewForeign(&Accessors::ScriptLineOffset));
- Handle<String> column_offset_symbol(
- factory()->LookupAsciiSymbol("column_offset"));
+ Handle<String> column_offset_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("column_offset")));
Handle<Foreign> script_column_offset(
factory()->NewForeign(&Accessors::ScriptColumnOffset));
- Handle<String> data_symbol(factory()->LookupAsciiSymbol("data"));
+ Handle<String> data_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("data")));
Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData));
- Handle<String> type_symbol(factory()->LookupAsciiSymbol("type"));
+ Handle<String> type_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("type")));
Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType));
- Handle<String> compilation_type_symbol(
- factory()->LookupAsciiSymbol("compilation_type"));
+ Handle<String> compilation_type_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("compilation_type")));
Handle<Foreign> script_compilation_type(
factory()->NewForeign(&Accessors::ScriptCompilationType));
- Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends"));
+ Handle<String> line_ends_string(factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("line_ends")));
Handle<Foreign> script_line_ends(
factory()->NewForeign(&Accessors::ScriptLineEnds));
- Handle<String> context_data_symbol(
- factory()->LookupAsciiSymbol("context_data"));
+ Handle<String> context_data_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("context_data")));
Handle<Foreign> script_context_data(
factory()->NewForeign(&Accessors::ScriptContextData));
- Handle<String> eval_from_script_symbol(
- factory()->LookupAsciiSymbol("eval_from_script"));
+ Handle<String> eval_from_script_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script")));
Handle<Foreign> script_eval_from_script(
factory()->NewForeign(&Accessors::ScriptEvalFromScript));
- Handle<String> eval_from_script_position_symbol(
- factory()->LookupAsciiSymbol("eval_from_script_position"));
+ Handle<String> eval_from_script_position_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_script_position")));
Handle<Foreign> script_eval_from_script_position(
factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition));
- Handle<String> eval_from_function_name_symbol(
- factory()->LookupAsciiSymbol("eval_from_function_name"));
+ Handle<String> eval_from_function_name_string(
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("eval_from_function_name")));
Handle<Foreign> script_eval_from_function_name(
factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName));
PropertyAttributes attribs =
@@ -1545,67 +1658,67 @@ bool Genesis::InstallNatives() {
{
CallbacksDescriptor d(
- *factory()->source_symbol(), *script_source, attribs);
+ *factory()->source_string(), *script_source, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs);
+ CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*id_symbol, *script_id, attribs);
+ CallbacksDescriptor d(*id_string, *script_id, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs);
+ CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *column_offset_symbol, *script_column_offset, attribs);
+ *column_offset_string, *script_column_offset, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*data_symbol, *script_data, attribs);
+ CallbacksDescriptor d(*data_string, *script_data, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*type_symbol, *script_type, attribs);
+ CallbacksDescriptor d(*type_string, *script_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *compilation_type_symbol, *script_compilation_type, attribs);
+ *compilation_type_string, *script_compilation_type, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
- CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs);
+ CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *context_data_symbol, *script_context_data, attribs);
+ *context_data_string, *script_context_data, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *eval_from_script_symbol, *script_eval_from_script, attribs);
+ *eval_from_script_string, *script_eval_from_script, attribs);
script_map->AppendDescriptor(&d, witness);
}
{
CallbacksDescriptor d(
- *eval_from_script_position_symbol,
+ *eval_from_script_position_string,
*script_eval_from_script_position,
attribs);
script_map->AppendDescriptor(&d, witness);
@@ -1613,7 +1726,7 @@ bool Genesis::InstallNatives() {
{
CallbacksDescriptor d(
- *eval_from_function_name_symbol,
+ *eval_from_function_name_string,
*script_eval_from_function_name,
attribs);
script_map->AppendDescriptor(&d, witness);
@@ -1639,60 +1752,24 @@ bool Genesis::InstallNatives() {
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
- { // --- I n t e r n a l A r r a y ---
- // An array constructor on the builtins object that works like
- // the public Array constructor, except that its prototype
- // doesn't inherit from Object.prototype.
- // To be used only for internal work by builtins. Instances
- // must not be leaked to user code.
+ // InternalArrays should not use Smi-Only array optimizations. There are too
+ // many places in the C++ runtime code (e.g. RegEx) that assume that
+ // elements in InternalArrays can be set to non-Smi values without going
+ // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
+ // transition easy to trap. Moreover, they rarely are smi-only.
+ {
Handle<JSFunction> array_function =
- InstallFunction(builtins,
- "InternalArray",
- JS_ARRAY_TYPE,
- JSArray::kSize,
- isolate()->initial_object_prototype(),
- Builtins::kInternalArrayCode,
- true);
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
- SetPrototype(array_function, prototype);
-
- array_function->shared()->set_construct_stub(
- isolate()->builtins()->builtin(Builtins::kArrayConstructCode));
- array_function->shared()->DontAdaptArguments();
-
- // InternalArrays should not use Smi-Only array optimizations. There are too
- // many places in the C++ runtime code (e.g. RegEx) that assume that
- // elements in InternalArrays can be set to non-Smi values without going
- // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT
- // transition easy to trap. Moreover, they rarely are smi-only.
- MaybeObject* maybe_map = array_function->initial_map()->Copy();
- Map* new_map;
- if (!maybe_map->To(&new_map)) return false;
- new_map->set_elements_kind(FAST_HOLEY_ELEMENTS);
- array_function->set_initial_map(new_map);
-
- // Make "length" magic on instances.
- Handle<Map> initial_map(array_function->initial_map());
- Handle<DescriptorArray> array_descriptors(
- factory()->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*array_descriptors);
-
- Handle<Foreign> array_length(factory()->NewForeign(
- &Accessors::ArrayLength));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE);
- initial_map->set_instance_descriptors(*array_descriptors);
-
- { // Add length.
- CallbacksDescriptor d(
- *factory()->length_symbol(), *array_length, attribs);
- array_function->initial_map()->AppendDescriptor(&d, witness);
- }
-
+ InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS);
+ if (array_function.is_null()) return false;
native_context()->set_internal_array_function(*array_function);
}
+ {
+ Handle<JSFunction> array_function =
+ InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
+ if (array_function.is_null()) return false;
+ }
+
if (FLAG_disable_native_files) {
PrintF("Warning: Running without installed natives!\n");
return true;
@@ -1720,9 +1797,10 @@ bool Genesis::InstallNatives() {
HeapObject::cast(string_function->initial_map()->prototype())->map());
// Install Function.prototype.call and apply.
- { Handle<String> key = factory()->function_class_symbol();
+ { Handle<String> key = factory()->function_class_string();
Handle<JSFunction> function =
- Handle<JSFunction>::cast(GetProperty(isolate()->global_object(), key));
+ Handle<JSFunction>::cast(
+ GetProperty(isolate(), isolate()->global_object(), key));
Handle<JSObject> proto =
Handle<JSObject>(JSObject::cast(function->instance_prototype()));
@@ -1783,7 +1861,7 @@ bool Genesis::InstallNatives() {
JSFunction* array_function = native_context()->array_function();
Handle<DescriptorArray> array_descriptors(
array_function->initial_map()->instance_descriptors());
- String* length = heap()->length_symbol();
+ String* length = heap()->length_string();
int old = array_descriptors->SearchWithCache(
length, array_function->initial_map());
ASSERT(old != DescriptorArray::kNotFound);
@@ -1793,14 +1871,14 @@ bool Genesis::InstallNatives() {
initial_map->AppendDescriptor(&desc, witness);
}
{
- FieldDescriptor index_field(heap()->index_symbol(),
+ FieldDescriptor index_field(heap()->index_string(),
JSRegExpResult::kIndexIndex,
NONE);
initial_map->AppendDescriptor(&index_field, witness);
}
{
- FieldDescriptor input_field(heap()->input_symbol(),
+ FieldDescriptor input_field(heap()->input_string(),
JSRegExpResult::kInputIndex,
NONE);
initial_map->AppendDescriptor(&input_field, witness);
@@ -1825,6 +1903,11 @@ bool Genesis::InstallExperimentalNatives() {
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount();
i++) {
+ if (FLAG_harmony_symbols &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native symbol.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
if (FLAG_harmony_proxies &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native proxy.js") == 0) {
@@ -1851,18 +1934,19 @@ bool Genesis::InstallExperimentalNatives() {
static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> native_context,
const char* holder_expr) {
- Factory* factory = native_context->GetIsolate()->factory();
+ Isolate* isolate = native_context->GetIsolate();
+ Factory* factory = isolate->factory();
Handle<GlobalObject> global(native_context->global_object());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
- return Handle<JSObject>::cast(
- GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
+ return Handle<JSObject>::cast(GetProperty(
+ isolate, global, factory->InternalizeUtf8String(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, factory->LookupSymbol(property)));
+ GetProperty(isolate, global, factory->InternalizeUtf8String(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -1871,7 +1955,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
const char* function_name,
BuiltinFunctionId id) {
Factory* factory = holder->GetIsolate()->factory();
- Handle<String> name = factory->LookupAsciiSymbol(function_name);
+ Handle<String> name = factory->InternalizeUtf8String(function_name);
Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked();
Handle<JSFunction> function(JSFunction::cast(function_object));
function->shared()->set_function_data(Smi::FromInt(id));
@@ -1879,7 +1963,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
void Genesis::InstallBuiltinFunctionIds() {
- HandleScope scope;
+ HandleScope scope(isolate());
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
{ \
Handle<JSObject> holder = ResolveBuiltinIdHolder( \
@@ -1944,10 +2028,9 @@ void Genesis::InitializeNormalizedMapCaches() {
bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- Isolate* isolate = native_context->GetIsolate();
- BootstrapperActive active;
- SaveContext saved_context(isolate);
- isolate->set_context(*native_context);
+ BootstrapperActive active(this);
+ SaveContext saved_context(isolate_);
+ isolate_->set_context(*native_context);
if (!Genesis::InstallExtensions(native_context, extensions)) return false;
Genesis::InstallSpecialObjects(native_context);
return true;
@@ -1957,12 +2040,13 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context,
void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Isolate* isolate = native_context->GetIsolate();
Factory* factory = isolate->factory();
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<JSGlobalObject> global(JSGlobalObject::cast(
native_context->global_object()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
- Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as);
+ Handle<String> natives =
+ factory->InternalizeUtf8String(FLAG_expose_natives_as);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, natives,
@@ -1972,8 +2056,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Object> Error = GetProperty(global, "Error");
if (Error->IsJSObject()) {
- Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
- Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit));
+ Handle<String> name = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("stackTraceLimit"));
+ Handle<Smi> stack_trace_limit(
+ Smi::FromInt(FLAG_stack_trace_limit), isolate);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
Handle<JSObject>::cast(Error), name,
@@ -1983,7 +2069,7 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
- Debug* debug = Isolate::Current()->debug();
+ Debug* debug = isolate->debug();
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
if (!debug->Load()) return;
@@ -1994,8 +2080,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) {
native_context->security_token());
Handle<String> debug_string =
- factory->LookupAsciiSymbol(FLAG_expose_debug_as);
- Handle<Object> global_proxy(debug->debug_context()->global_proxy());
+ factory->InternalizeUtf8String(FLAG_expose_debug_as);
+ Handle<Object> global_proxy(
+ debug->debug_context()->global_proxy(), isolate);
CHECK_NOT_EMPTY_HANDLE(isolate,
JSObject::SetLocalPropertyIgnoreAttributes(
global, debug_string, global_proxy, DONT_ENUM));
@@ -2032,26 +2119,22 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension,
bool Genesis::InstallExtensions(Handle<Context> native_context,
v8::ExtensionConfiguration* extensions) {
- // TODO(isolates): Extensions on multiple isolates may take a little more
- // effort. (The external API reads 'ignore'-- does that mean
- // we can break the interface?)
-
-
+ Isolate* isolate = native_context->GetIsolate();
ExtensionStates extension_states; // All extensions have state UNVISITED.
// Install auto extensions.
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
while (current != NULL) {
if (current->extension()->auto_enable())
- InstallExtension(current, &extension_states);
+ InstallExtension(isolate, current, &extension_states);
current = current->next();
}
- if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states);
+ if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states);
if (FLAG_expose_externalize_string) {
- InstallExtension("v8/externalize", &extension_states);
+ InstallExtension(isolate, "v8/externalize", &extension_states);
}
if (FLAG_track_gc_object_stats) {
- InstallExtension("v8/statistics", &extension_states);
+ InstallExtension(isolate, "v8/statistics", &extension_states);
}
if (extensions == NULL) return true;
@@ -2059,7 +2142,7 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
int count = v8::ImplementationUtilities::GetNameCount(extensions);
const char** names = v8::ImplementationUtilities::GetNames(extensions);
for (int i = 0; i < count; i++) {
- if (!InstallExtension(names[i], &extension_states))
+ if (!InstallExtension(isolate, names[i], &extension_states))
return false;
}
@@ -2069,7 +2152,8 @@ bool Genesis::InstallExtensions(Handle<Context> native_context,
// Installs a named extension. This methods is unoptimized and does
// not scale well if we want to support a large number of extensions.
-bool Genesis::InstallExtension(const char* name,
+bool Genesis::InstallExtension(Isolate* isolate,
+ const char* name,
ExtensionStates* extension_states) {
v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension();
// Loop until we find the relevant extension
@@ -2083,13 +2167,14 @@ bool Genesis::InstallExtension(const char* name,
"v8::Context::New()", "Cannot find required extension");
return false;
}
- return InstallExtension(current, extension_states);
+ return InstallExtension(isolate, current, extension_states);
}
-bool Genesis::InstallExtension(v8::RegisteredExtension* current,
+bool Genesis::InstallExtension(Isolate* isolate,
+ v8::RegisteredExtension* current,
ExtensionStates* extension_states) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (extension_states->get_state(current) == INSTALLED) return true;
// The current node has already been visited so there must be a
@@ -2104,19 +2189,21 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current,
v8::Extension* extension = current->extension();
// Install the extension's dependencies
for (int i = 0; i < extension->dependency_count(); i++) {
- if (!InstallExtension(extension->dependencies()[i], extension_states))
+ if (!InstallExtension(isolate,
+ extension->dependencies()[i],
+ extension_states)) {
return false;
+ }
}
- Isolate* isolate = Isolate::Current();
Handle<String> source_code =
isolate->factory()->NewExternalStringFromAscii(extension->source());
- bool result = CompileScriptCached(
- CStrVector(extension->name()),
- source_code,
- isolate->bootstrapper()->extensions_cache(),
- extension,
- Handle<Context>(isolate->context()),
- false);
+ bool result = CompileScriptCached(isolate,
+ CStrVector(extension->name()),
+ source_code,
+ isolate->bootstrapper()->extensions_cache(),
+ extension,
+ Handle<Context>(isolate->context()),
+ false);
ASSERT(isolate->has_pending_exception() != result);
if (!result) {
// We print out the name of the extension that fail to install.
@@ -2134,11 +2221,11 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current,
bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
- HandleScope scope;
- Factory* factory = builtins->GetIsolate()->factory();
+ HandleScope scope(isolate());
for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id));
+ Handle<String> name =
+ factory()->InternalizeUtf8String(Builtins::GetName(id));
Object* function_object = builtins->GetPropertyNoExceptionThrown(*name);
Handle<JSFunction> function
= Handle<JSFunction>(JSFunction::cast(function_object));
@@ -2208,21 +2295,22 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
PropertyDetails details = descs->GetDetails(i);
switch (details.type()) {
case FIELD: {
- HandleScope inner;
+ HandleScope inner(isolate());
Handle<String> key = Handle<String>(descs->GetKey(i));
int index = descs->GetFieldIndex(i);
- Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ Handle<Object> value = Handle<Object>(from->FastPropertyAt(index),
+ isolate());
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
break;
}
case CONSTANT_FUNCTION: {
- HandleScope inner;
+ HandleScope inner(isolate());
Handle<String> key = Handle<String>(descs->GetKey(i));
Handle<JSFunction> fun =
Handle<JSFunction>(descs->GetConstantFunction(i));
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, fun, details.attributes()));
break;
@@ -2232,11 +2320,11 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
to->LocalLookup(descs->GetKey(i), &result);
// If the property is already there we skip it
if (result.IsFound()) continue;
- HandleScope inner;
+ HandleScope inner(isolate());
ASSERT(!to->HasFastProperties());
// Add to dictionary.
Handle<String> key = Handle<String>(descs->GetKey(i));
- Handle<Object> callbacks(descs->GetCallbacksObject(i));
+ Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
PropertyDetails d = PropertyDetails(details.attributes(),
CALLBACKS,
details.descriptor_index());
@@ -2268,12 +2356,14 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
if (result.IsFound()) continue;
// Set the property.
Handle<String> key = Handle<String>(String::cast(raw_key));
- Handle<Object> value = Handle<Object>(properties->ValueAt(i));
+ Handle<Object> value = Handle<Object>(properties->ValueAt(i),
+ isolate());
if (value->IsJSGlobalPropertyCell()) {
- value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value());
+ value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value(),
+ isolate());
}
PropertyDetails details = properties->DetailsAt(i);
- CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(),
+ CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
to, key, value, details.attributes()));
}
@@ -2293,8 +2383,8 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from,
void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
- HandleScope outer;
- Factory* factory = from->GetIsolate()->factory();
+ HandleScope outer(isolate());
+ Factory* factory = isolate()->factory();
ASSERT(!from->IsJSArray());
ASSERT(!to->IsJSArray());
@@ -2328,20 +2418,22 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
Genesis::Genesis(Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
- v8::ExtensionConfiguration* extensions) : isolate_(isolate) {
+ v8::ExtensionConfiguration* extensions)
+ : isolate_(isolate),
+ active_(isolate->bootstrapper()) {
result_ = Handle<Context>::null();
// If V8 isn't running and cannot be initialized, just return.
if (!V8::IsRunning() && !V8::Initialize(NULL)) return;
// Before creating the roots we must save the context and restore it
// on all function exits.
- HandleScope scope;
+ HandleScope scope(isolate);
SaveContext saved_context(isolate);
// During genesis, the boilerplate for stack overflow won't work until the
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
- StackLimitCheck check(Isolate::Current());
+ StackLimitCheck check(isolate);
if (check.HasOverflowed()) return;
Handle<Context> new_context = Snapshot::NewContextFromSnapshot();
diff --git a/src/3rdparty/v8/src/bootstrapper.h b/src/3rdparty/v8/src/bootstrapper.h
index 179e65c..e33415e 100644
--- a/src/3rdparty/v8/src/bootstrapper.h
+++ b/src/3rdparty/v8/src/bootstrapper.h
@@ -54,8 +54,8 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
- SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
- if (str->IsEqualTo(name)) {
+ SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
+ if (str->IsUtf8EqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true;
@@ -65,7 +65,7 @@ class SourceCodeCache BASE_EMBEDDED {
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
- HandleScope scope;
+ HandleScope scope(shared->GetIsolate());
int length = cache_->length();
Handle<FixedArray> new_array =
FACTORY->NewFixedArray(length + 2, TENURED);
@@ -95,7 +95,6 @@ class Bootstrapper {
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
Handle<Context> CreateEnvironment(
- Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
@@ -132,6 +131,7 @@ class Bootstrapper {
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
private:
+ Isolate* isolate_;
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
@@ -144,7 +144,7 @@ class Bootstrapper {
friend class Isolate;
friend class NativesExternalStringResource;
- Bootstrapper();
+ explicit Bootstrapper(Isolate* isolate);
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
@@ -152,15 +152,18 @@ class Bootstrapper {
class BootstrapperActive BASE_EMBEDDED {
public:
- BootstrapperActive() {
- ++Isolate::Current()->bootstrapper()->nesting_;
+ explicit BootstrapperActive(Bootstrapper* bootstrapper)
+ : bootstrapper_(bootstrapper) {
+ ++bootstrapper_->nesting_;
}
~BootstrapperActive() {
- --Isolate::Current()->bootstrapper()->nesting_;
+ --bootstrapper_->nesting_;
}
private:
+ Bootstrapper* bootstrapper_;
+
DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};
diff --git a/src/3rdparty/v8/src/builtins.cc b/src/3rdparty/v8/src/builtins.cc
index 620e4b3..aa69203 100644
--- a/src/3rdparty/v8/src/builtins.cc
+++ b/src/3rdparty/v8/src/builtins.cc
@@ -150,7 +150,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
// code below.
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
ASSERT(it.frame()->is_exit());
it.Advance();
StackFrame* frame = it.frame();
@@ -186,9 +186,67 @@ BUILTIN(EmptyFunction) {
}
+#define CONVERT_ARG_STUB_CALLER_ARGS(name) \
+ Arguments* name = reinterpret_cast<Arguments*>(args[0]);
+
+
+RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) {
+ CONVERT_ARG_STUB_CALLER_ARGS(caller_args);
+ // ASSERT(args.length() == 3);
+ Handle<JSFunction> function = args.at<JSFunction>(1);
+ Handle<Object> type_info = args.at<Object>(2);
+
+ JSArray* array = NULL;
+ bool holey = false;
+ if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) {
+ int value = Smi::cast((*caller_args)[0])->value();
+ holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray);
+ }
+
+ ASSERT(function->has_initial_map());
+ ElementsKind kind = function->initial_map()->elements_kind();
+ if (holey) {
+ kind = GetHoleyElementsKind(kind);
+ }
+
+ MaybeObject* maybe_array;
+ if (*type_info != isolate->heap()->undefined_value()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info);
+ if (cell->value()->IsSmi()) {
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ if (holey && !IsFastHoleyElementsKind(to_kind)) {
+ to_kind = GetHoleyElementsKind(to_kind);
+ // Update the allocation site info to reflect the advice alteration.
+ cell->set_value(Smi::FromInt(to_kind));
+ }
+
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind);
+ if (mode == TRACK_ALLOCATION_SITE) {
+ maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite(
+ kind, type_info);
+ } else {
+ maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
+ }
+ if (!maybe_array->To(&array)) return maybe_array;
+ }
+ }
+
+ if (array == NULL) {
+ maybe_array = isolate->heap()->AllocateEmptyJSArray(kind);
+ if (!maybe_array->To(&array)) return maybe_array;
+ }
+
+ maybe_array = ArrayConstructInitializeElements(array, caller_args);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array;
+}
+
+
static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
Isolate* isolate,
JSFunction* constructor) {
+ ASSERT(args->length() >= 1);
Heap* heap = isolate->heap();
isolate->counters()->array_function_runtime()->Increment();
@@ -197,8 +255,29 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
array = JSArray::cast((*args)[0]);
// Initialize elements and length in case later allocations fail so that the
// array object is initialized in a valid state.
- array->set_length(Smi::FromInt(0));
- array->set_elements(heap->empty_fixed_array());
+ MaybeObject* maybe_array = array->Initialize(0);
+ if (maybe_array->IsFailure()) return maybe_array;
+
+ if (FLAG_optimize_constructed_arrays) {
+ AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array);
+ ElementsKind to_kind = array->GetElementsKind();
+ if (info != NULL && info->GetElementsKindPayload(&to_kind)) {
+ if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(),
+ to_kind)) {
+ // We have advice that we should change the elements kind
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n",
+ reinterpret_cast<void*>(array),
+ ElementsKindToString(array->GetElementsKind()),
+ ElementsKindToString(to_kind));
+ }
+
+ maybe_array = array->TransitionElementsKind(to_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+ }
+ }
+
if (!FLAG_smi_only_arrays) {
Context* native_context = isolate->context()->native_context();
if (array->GetElementsKind() == GetInitialFastElementsKind() &&
@@ -215,97 +294,10 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
if (!maybe_obj->To(&array)) return maybe_obj;
}
- // Optimize the case where there is one argument and the argument is a
- // small smi.
- if (args->length() == 2) {
- Object* obj = (*args)[1];
- if (obj->IsSmi()) {
- int len = Smi::cast(obj)->value();
- if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) {
- Object* fixed_array;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len);
- if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj;
- }
- ElementsKind elements_kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- MaybeObject* maybe_array =
- array->TransitionElementsKind(elements_kind);
- if (maybe_array->IsFailure()) return maybe_array;
- }
- // We do not use SetContent to skip the unnecessary elements type check.
- array->set_elements(FixedArray::cast(fixed_array));
- array->set_length(Smi::cast(obj));
- return array;
- }
- }
- // Take the argument as the length.
- { MaybeObject* maybe_obj = array->Initialize(0);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- return array->SetElementsLength((*args)[1]);
- }
-
- // Optimize the case where there are no parameters passed.
- if (args->length() == 1) {
- return array->Initialize(JSArray::kPreallocatedArrayElements);
- }
-
- // Set length and elements on the array.
- int number_of_elements = args->length() - 1;
- MaybeObject* maybe_object =
- array->EnsureCanContainElements(args, 1, number_of_elements,
- ALLOW_CONVERTED_DOUBLE_ELEMENTS);
- if (maybe_object->IsFailure()) return maybe_object;
-
- // Allocate an appropriately typed elements array.
- MaybeObject* maybe_elms;
- ElementsKind elements_kind = array->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
- maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
- number_of_elements);
- } else {
- maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
- }
- FixedArrayBase* elms;
- if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
-
- // Fill in the content
- switch (array->GetElementsKind()) {
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* smi_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER);
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS: {
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- FixedArray* object_elms = FixedArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- object_elms->set(index, (*args)[index+1], mode);
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
- for (int index = 0; index < number_of_elements; index++) {
- double_elms->set(index, (*args)[index+1]->Number());
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-
- array->set_elements(elms);
- array->set_length(Smi::FromInt(number_of_elements));
- return array;
+ Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1);
+ ASSERT(adjusted_arguments.length() < 1 ||
+ adjusted_arguments[0] == (*args)[1]);
+ return ArrayConstructInitializeElements(array, &adjusted_arguments);
}
@@ -325,23 +317,15 @@ BUILTIN(ArrayCodeGeneric) {
}
-static void MoveElements(Heap* heap,
- AssertNoAllocation* no_gc,
- FixedArray* dst,
- int dst_index,
- FixedArray* src,
- int src_index,
- int len) {
+static void MoveDoubleElements(FixedDoubleArray* dst,
+ int dst_index,
+ FixedDoubleArray* src,
+ int src_index,
+ int len) {
if (len == 0) return;
- ASSERT(dst->map() != HEAP->fixed_cow_array_map());
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
- len * kPointerSize);
- WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc);
- if (mode == UPDATE_WRITE_BARRIER) {
- heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len);
- }
- heap->incremental_marking()->RecordWrites(dst);
+ len * kDoubleSize);
}
@@ -351,24 +335,39 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
}
-static FixedArray* LeftTrimFixedArray(Heap* heap,
- FixedArray* elms,
- int to_trim) {
+static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
+ for (int i = from; i < to; i++) {
+ dst->set_the_hole(i);
+ }
+}
+
+
+static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
+ FixedArrayBase* elms,
+ int to_trim) {
+ Map* map = elms->map();
+ int entry_size;
+ if (elms->IsFixedArray()) {
+ entry_size = kPointerSize;
+ } else {
+ entry_size = kDoubleSize;
+ }
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
- STATIC_ASSERT(FixedArray::kMapOffset == 0);
- STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+ STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
- if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
+ if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
+ elms->IsFixedArray() &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@@ -382,14 +381,15 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
- former_start[to_trim] = heap->fixed_array_map();
- former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
+ int new_start_index = to_trim * (entry_size / kPointerSize);
+ former_start[new_start_index] = map;
+ former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
- int size_delta = to_trim * kPointerSize;
+ int size_delta = to_trim * entry_size;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
@@ -397,8 +397,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap,
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
- return FixedArray::cast(HeapObject::FromAddress(
- elms->address() + to_trim * kPointerSize));
+ return FixedArrayBase::cast(HeapObject::FromAddress(
+ elms->address() + to_trim * entry_size));
}
@@ -427,19 +427,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
- if (array->HasFastDoubleElements()) {
- ASSERT(elms == heap->empty_fixed_array());
- MaybeObject* maybe_transition =
- array->TransitionElementsKind(FAST_ELEMENTS);
- if (maybe_transition->IsFailure()) return maybe_transition;
- return elms;
- }
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
- maybe_writable_result->IsFailure()) {
+ !maybe_writable_result->To(&elms)) {
return maybe_writable_result;
}
+ } else if (map == heap->fixed_double_array_map()) {
+ if (args == NULL) return elms;
} else {
return NULL;
}
@@ -449,13 +444,28 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
- MaybeObject* maybe_array = array->EnsureCanContainElements(
- args,
- first_added_arg,
- args_length - first_added_arg,
- DONT_ALLOW_DOUBLE_ELEMENTS);
- if (maybe_array->IsFailure()) return maybe_array;
- return array->elements();
+ ElementsKind origin_kind = array->map()->elements_kind();
+ ASSERT(!IsFastObjectElementsKind(origin_kind));
+ ElementsKind target_kind = origin_kind;
+ int arg_count = args->length() - first_added_arg;
+ Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
+ for (int i = 0; i < arg_count; i++) {
+ Object* arg = arguments[i];
+ if (arg->IsHeapObject()) {
+ if (arg->IsHeapNumber()) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
+ }
+ }
+ }
+ if (target_kind != origin_kind) {
+ MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return array->elements();
+ }
+ return elms;
}
@@ -499,137 +509,191 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
JSArray* array = JSArray::cast(receiver);
+ ASSERT(!array->map()->is_observed());
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
+ ElementsKind kind = array->GetElementsKind();
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ if (IsFastSmiOrObjectElementsKind(kind)) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
- int new_length = len + to_add;
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ int new_length = len + to_add;
+
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
+ elms = new_elms;
}
- FixedArray* new_elms = FixedArray::cast(obj);
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
- FillWithHoles(heap, new_elms, new_length, capacity);
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < to_add; index++) {
+ elms->set(index + len, args[index + 1], mode);
+ }
- elms = new_elms;
- }
+ if (elms != array->elements()) {
+ array->set_elements(elms);
+ }
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+ } else {
+ int len = Smi::cast(array->length())->value();
+ int elms_len = elms_obj->length();
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
+ int new_length = len + to_add;
+
+ FixedDoubleArray* new_elms;
+
+ if (new_length > elms_len) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedDoubleArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
+
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+ } else {
+ // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
+ // empty_fixed_array.
+ new_elms = FixedDoubleArray::cast(elms_obj);
+ }
+
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ int index;
+ for (index = 0; index < to_add; index++) {
+ Object* arg = args[index + 1];
+ new_elms->set(index + len, arg->Number());
+ }
+
+ if (new_elms != array->elements()) {
+ array->set_elements(new_elms);
+ }
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
+ }
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayPop", args);
- }
+ JSArray* array = JSArray::cast(receiver);
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
- // Get top element
- Object* top = elms->get(len - 1);
-
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
- if (!top->IsTheHole()) {
- // Delete the top element.
- elms->set_the_hole(len - 1);
- return top;
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = len - 1;
+ MaybeObject* maybe_result;
+ if (accessor->HasElement(array, array, new_length, elms_obj)) {
+ maybe_result = accessor->Get(array, array, new_length, elms_obj);
+ } else {
+ maybe_result = array->GetPrototype()->GetElement(len - 1);
}
-
- return array->GetPrototype()->GetElement(len - 1);
+ if (maybe_result->IsFailure()) return maybe_result;
+ MaybeObject* maybe_failure =
+ accessor->SetLength(array, Smi::FromInt(new_length));
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ return maybe_result;
}
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayShift", args);
- }
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get first element
- Object* first = elms->get(0);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ Object* first;
+ MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
+ if (!maybe_first->To(&first)) return maybe_first;
if (first->IsTheHole()) {
first = heap->undefined_value();
}
- if (!heap->lo_space()->Contains(elms)) {
- array->set_elements(LeftTrimFixedArray(heap, elms, 1));
+ if (!heap->lo_space()->Contains(elms_obj)) {
+ array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
} else {
// Shift the elements.
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
+ if (elms_obj->IsFixedArray()) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ heap->MoveElements(elms, 0, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
+ } else {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, 0, elms, 1, len - 1);
+ elms->set_the_hole(len - 1);
+ }
}
// Set the length.
@@ -642,23 +706,22 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
+
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
+ ASSERT(!array->map()->is_observed());
+ if (!array->HasFastSmiOrObjectElements()) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -675,19 +738,23 @@ BUILTIN(ArrayUnshift) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_elms->To(&new_elms)) return maybe_elms;
+
ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
- FillWithHoles(heap, new_elms, new_length, capacity);
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, to_add,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
elms = new_elms;
array->set_elements(elms);
} else {
AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, to_add, elms, 0, len);
+ heap->MoveElements(elms, to_add, 0, len);
}
// Add the provided values.
@@ -706,16 +773,20 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArray* elms;
+ FixedArrayBase* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastSmiOrObjectElements() ||
- !IsJSArrayFastElementMovingAllowed(heap, array)) {
+ if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+
+ if (array->HasFastElements()) {
+ elms = array->elements();
+ } else {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@@ -724,15 +795,19 @@ BUILTIN(ArraySlice) {
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject()
- && JSObject::cast(receiver)->map() == arguments_map
- && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
+ receiver->IsJSObject() &&
+ JSObject::cast(receiver)->map() == arguments_map;
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- elms = FixedArray::cast(JSObject::cast(receiver)->elements());
- Object* len_obj = JSObject::cast(receiver)
- ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ JSObject* object = JSObject::cast(receiver);
+
+ if (object->HasFastElements()) {
+ elms = object->elements();
+ } else {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -740,12 +815,10 @@ BUILTIN(ArraySlice) {
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- for (int i = 0; i < len; i++) {
- if (elms->get(i) == heap->the_hole_value()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- }
}
+
+ JSObject* object = JSObject::cast(receiver);
+
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -758,6 +831,12 @@ BUILTIN(ArraySlice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -765,6 +844,12 @@ BUILTIN(ArraySlice) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
+ } else if (arg2->IsHeapNumber()) {
+ double end = HeapNumber::cast(arg2)->value();
+ if (end < kMinInt || end > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ relative_end = static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -779,21 +864,40 @@ BUILTIN(ArraySlice) {
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
- ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
-
// Calculate the length of result array.
int result_len = Max(final - k, 0);
- MaybeObject* maybe_array =
- heap->AllocateJSArrayAndStorage(elements_kind,
- result_len,
- result_len);
+ ElementsKind kind = object->GetElementsKind();
+ if (IsHoleyElementsKind(kind)) {
+ bool packed = true;
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ for (int i = k; i < final; i++) {
+ if (!accessor->HasElement(object, object, i, elms)) {
+ packed = false;
+ break;
+ }
+ }
+ if (packed) {
+ kind = GetPackedElementsKind(kind);
+ } else if (!receiver->IsJSArray()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
+ }
+ }
+
JSArray* result_array;
+ MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
+ result_len,
+ result_len);
+
+ AssertNoAllocation no_gc;
+ if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- CopyObjectToObjectElements(elms, elements_kind, k,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, result_len);
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, k, kind, result_array->elements(), 0, result_len, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
return result_array;
}
@@ -802,23 +906,19 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- Object* elms_obj;
- { MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArraySplice", args);
- if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
- }
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
+ FixedArrayBase* elms_obj;
+ MaybeObject* maybe_elms =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+ if (maybe_elms == NULL) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
- FixedArray* elms = FixedArray::cast(elms_obj);
- JSArray* array = JSArray::cast(receiver);
- ASSERT(array->HasFastSmiOrObjectElements());
+ if (!maybe_elms->To(&elms_obj)) return maybe_elms;
- if (FLAG_harmony_observation && array->map()->is_observed()) {
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
+ JSArray* array = JSArray::cast(receiver);
+ ASSERT(!array->map()->is_observed());
int len = Smi::cast(array->length())->value();
@@ -829,6 +929,12 @@ BUILTIN(ArraySplice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
+ } else if (arg1->IsHeapNumber()) {
+ double start = HeapNumber::cast(arg1)->value();
+ if (start < kMinInt || start > kMaxInt) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+ relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -858,51 +964,83 @@ BUILTIN(ArraySplice) {
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
+ ElementsKind elements_kind = array->GetElementsKind();
+
+ int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+ int new_length = len - actual_delete_count + item_count;
+
+ // For double mode we do not support changing the length.
+ if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ }
+
+ if (new_length == 0) {
+ MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
+ elms_obj, elements_kind, actual_delete_count);
+ if (maybe_array->IsFailure()) return maybe_array;
+ array->set_elements(heap->empty_fixed_array());
+ array->set_length(Smi::FromInt(0));
+ return maybe_array;
+ }
+
JSArray* result_array = NULL;
- ElementsKind elements_kind =
- JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
- {
- // Fill newly created array.
- CopyObjectToObjectElements(elms, elements_kind, actual_start,
- FixedArray::cast(result_array->elements()),
- elements_kind, 0, actual_delete_count);
+ if (actual_delete_count > 0) {
+ AssertNoAllocation no_gc;
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, actual_start, elements_kind, result_array->elements(),
+ 0, actual_delete_count, elms_obj);
+ // Cannot fail since the origin and target array are of the same elements
+ // kind.
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
}
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
-
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
- {
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, delta, elms, 0, actual_start);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
+ heap->MoveElements(elms, delta, 0, actual_start);
}
- elms = LeftTrimFixedArray(heap, elms, delta);
+ elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
elms_changed = true;
} else {
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
+ if (elms_obj->IsFixedDoubleArray()) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ MoveDoubleElements(elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(elms, new_length, len);
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ heap->MoveElements(elms, actual_start + item_count,
+ actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(heap, elms, new_length, len);
+ }
}
} else if (item_count > actual_delete_count) {
+ FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -911,48 +1049,60 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- Object* obj;
- { MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
- FixedArray* new_elms = FixedArray::cast(obj);
+ FixedArray* new_elms;
+ MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->To(&new_elms)) return maybe_obj;
- {
+ AssertNoAllocation no_gc;
+
+ ElementsKind kind = array->GetElementsKind();
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ if (actual_start > 0) {
// Copy the part before actual_start as is.
- ElementsKind kind = array->GetElementsKind();
- CopyObjectToObjectElements(elms, kind, 0,
- new_elms, kind, 0, actual_start);
- const int to_copy = len - actual_delete_count - actual_start;
- CopyObjectToObjectElements(elms, kind,
- actual_start + actual_delete_count,
- new_elms, kind,
- actual_start + item_count, to_copy);
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, 0, kind, new_elms, 0, actual_start, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
}
-
- FillWithHoles(heap, new_elms, new_length, capacity);
-
- elms = new_elms;
+ MaybeObject* maybe_failure = accessor->CopyElements(
+ NULL, actual_start + actual_delete_count, kind, new_elms,
+ actual_start + item_count,
+ ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
+ ASSERT(!maybe_failure->IsFailure());
+ USE(maybe_failure);
+
+ elms_obj = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
+ heap->MoveElements(elms, actual_start + item_count,
+ actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
}
}
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ Object* arg = args[3 + k - actual_start];
+ if (arg->IsSmi()) {
+ elms->set(k, Smi::cast(arg)->value());
+ } else {
+ elms->set(k, HeapNumber::cast(arg)->value());
+ }
+ }
+ } else {
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ elms->set(k, args[3 + k - actual_start], mode);
+ }
}
if (elms_changed) {
- array->set_elements(elms);
+ array->set_elements(elms_obj);
}
-
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -974,14 +1124,15 @@ BUILTIN(ArrayConcat) {
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
+ bool has_double = false;
+ bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
+ !JSArray::cast(arg)->HasFastElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
-
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@@ -991,47 +1142,52 @@ BUILTIN(ArrayConcat) {
result_len += len;
ASSERT(result_len >= 0);
- if (result_len > FixedArray::kMaxLength) {
+ if (result_len > FixedDoubleArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- if (!JSArray::cast(arg)->HasFastSmiElements()) {
- if (IsFastSmiElementsKind(elements_kind)) {
- if (IsFastHoleyElementsKind(elements_kind)) {
- elements_kind = FAST_HOLEY_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
- }
-
- if (JSArray::cast(arg)->HasFastHoleyElements()) {
- elements_kind = GetHoleyElementsKind(elements_kind);
+ ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+ if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
+ elements_kind = arg_kind;
}
}
- // Allocate result.
+ if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
+
+ // If a double array is concatted into a fast elements array, the fast
+ // elements array needs to be initialized to contain proper holes, since
+ // boxing doubles may cause incremental marking.
+ ArrayStorageAllocationMode mode =
+ has_double && IsFastObjectElementsKind(elements_kind)
+ ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
JSArray* result_array;
+ // Allocate result.
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
- result_len);
+ result_len,
+ mode);
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
- // Copy data.
- int start_pos = 0;
- FixedArray* result_elms(FixedArray::cast(result_array->elements()));
+ int j = 0;
+ FixedArrayBase* storage = result_array->elements();
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
- FixedArray* elms = FixedArray::cast(array->elements());
- CopyObjectToObjectElements(elms, elements_kind, 0,
- result_elms, elements_kind,
- start_pos, len);
- start_pos += len;
+ ElementsKind from_kind = array->GetElementsKind();
+ if (len > 0) {
+ MaybeObject* maybe_failure =
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ j += len;
+ }
}
- ASSERT(start_pos == result_len);
+
+ ASSERT(j == result_len);
return result_array;
}
@@ -1042,7 +1198,7 @@ BUILTIN(ArrayConcat) {
BUILTIN(StrictModePoisonPill) {
- HandleScope scope;
+ HandleScope scope(isolate);
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_poison_pill", HandleVector<Object>(NULL, 0)));
}
@@ -1051,12 +1207,28 @@ BUILTIN(StrictModePoisonPill) {
//
+// Searches the hidden prototype chain of the given object for the first
+// object that is an instance of the given type. If no such object can
+// be found then Heap::null_value() is returned.
+static inline Object* FindHidden(Heap* heap,
+ Object* object,
+ FunctionTemplateInfo* type) {
+ if (object->IsInstanceOf(type)) return object;
+ Object* proto = object->GetPrototype(heap->isolate());
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype()) {
+ return FindHidden(heap, proto, type);
+ }
+ return heap->null_value();
+}
+
+
// Returns the holder JSObject if the function can legally be called
// with this receiver. Returns Heap::null_value() if the call is
// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Arguments that do fit the expected
-// type is overwritten with the object in the prototype chain that
-// actually has that type.
+// overwritten with undefined. Note that holder and the arguments are
+// implicitly rewritten with the first object in the hidden prototype
+// chain that actually has the expected type.
static inline Object* TypeCheck(Heap* heap,
int argc,
Object** argv,
@@ -1069,15 +1241,10 @@ static inline Object* TypeCheck(Heap* heap,
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
// If necessary, check the receiver
Object* recv_type = sig->receiver();
-
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
- if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
- break;
- }
- }
- if (holder == heap->null_value()) return holder;
+ holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
+ if (holder == heap->null_value()) return heap->null_value();
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@@ -1090,13 +1257,9 @@ static inline Object* TypeCheck(Heap* heap,
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- for (; current != heap->null_value(); current = current->GetPrototype()) {
- if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
- *arg = current;
- break;
- }
- }
- if (current == heap->null_value()) *arg = heap->undefined_value();
+ current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
+ if (current == heap->null_value()) current = heap->undefined_value();
+ *arg = current;
}
return holder;
}
@@ -1267,26 +1430,6 @@ BUILTIN(HandleApiCallAsConstructor) {
}
-static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) {
- LoadIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_LoadIC_StringLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, false);
-}
-
-
-static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) {
- LoadIC::GenerateStringLength(masm, true);
-}
-
-
-static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) {
- LoadIC::GenerateFunctionPrototype(masm);
-}
-
-
static void Generate_LoadIC_Initialize(MacroAssembler* masm) {
LoadIC::GenerateInitialize(masm);
}
@@ -1328,12 +1471,12 @@ static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) {
static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, false);
+ KeyedLoadIC::GenerateMiss(masm, MISS);
}
static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedLoadIC::GenerateMiss(masm, true);
+ KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
}
@@ -1394,16 +1537,6 @@ static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
}
-static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
-}
-
-
-static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
- StoreIC::GenerateArrayLength(masm);
-}
-
-
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
}
@@ -1430,12 +1563,12 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, false);
+ KeyedStoreIC::GenerateMiss(masm, MISS);
}
static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) {
- KeyedStoreIC::GenerateMiss(masm, true);
+ KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC);
}
diff --git a/src/3rdparty/v8/src/builtins.h b/src/3rdparty/v8/src/builtins.h
index a2f752e..15abeb1 100644
--- a/src/3rdparty/v8/src/builtins.h
+++ b/src/3rdparty/v8/src/builtins.h
@@ -107,6 +107,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
@@ -131,15 +133,7 @@ enum BuiltinExtraArguments {
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
- V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
+ Code::IC_FRAGMENT) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \
@@ -149,48 +143,44 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
- V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
+ V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
- Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
- V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
+ V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
- V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
- kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
- V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
+ V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \
kStrictMode) \
V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
- V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
- V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \
kStrictMode) \
- V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
+ V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
@@ -219,31 +209,31 @@ enum BuiltinExtraArguments {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
-#define BUILTIN_LIST_DEBUG_A(V) \
- V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState) \
- V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \
- Code::kNoExtraICState)
+#define BUILTIN_LIST_DEBUG_A(V) \
+ V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK) \
+ V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \
+ DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
@@ -282,6 +272,7 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
+MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
class BuiltinFunctionTable;
class ObjectVisitor;
@@ -386,6 +377,7 @@ class Builtins {
static void Generate_NotifyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
static void Generate_NotifyOSR(MacroAssembler* masm);
+ static void Generate_NotifyStubFailure(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
static void Generate_FunctionCall(MacroAssembler* masm);
diff --git a/src/3rdparty/v8/src/checks.cc b/src/3rdparty/v8/src/checks.cc
index 320fd6b..a6405ec 100644
--- a/src/3rdparty/v8/src/checks.cc
+++ b/src/3rdparty/v8/src/checks.cc
@@ -46,7 +46,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
va_start(arguments, format);
i::OS::VPrintError(format, arguments);
va_end(arguments);
- i::OS::PrintError("\n#\n\n");
+ i::OS::PrintError("\n#\n");
+ i::OS::DumpBacktrace();
}
// First two times we may try to print a stack dump.
if (fatal_error_handler_nesting_depth < 3) {
diff --git a/src/3rdparty/v8/src/circular-queue.cc b/src/3rdparty/v8/src/circular-queue.cc
index 928c3f0..2818ce9 100644
--- a/src/3rdparty/v8/src/circular-queue.cc
+++ b/src/3rdparty/v8/src/circular-queue.cc
@@ -33,18 +33,16 @@ namespace v8 {
namespace internal {
-SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
- int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks)
+SamplingCircularQueue::SamplingCircularQueue(
+ int record_size_in_bytes,
+ int desired_chunk_size_in_bytes,
+ int buffer_size_in_chunks,
+ bool keep_producer_consumer_distance)
: record_size_(record_size_in_bytes / sizeof(Cell)),
chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes *
record_size_in_bytes),
chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)),
buffer_size_(chunk_size_ * buffer_size_in_chunks),
- // The distance ensures that producer and consumer never step on
- // each other's chunks and helps eviction of produced data from
- // the CPU cache (having that chunk size is bigger than the cache.)
- producer_consumer_distance_(2 * chunk_size_),
buffer_(NewArray<Cell>(buffer_size_ + 1)) {
ASSERT(buffer_size_in_chunks > 2);
// Clean up the whole buffer to avoid encountering a random kEnd
@@ -74,7 +72,13 @@ SamplingCircularQueue::SamplingCircularQueue(int record_size_in_bytes,
ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <=
positions_ + positions_size);
consumer_pos_->dequeue_chunk_pos = buffer_;
- consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance_;
+ consumer_pos_->dequeue_chunk_poll_pos = buffer_;
+ // The distance ensures that producer and consumer never step on
+ // each other's chunks and helps eviction of produced data from
+ // the CPU cache (having that chunk size is bigger than the cache.)
+ if (keep_producer_consumer_distance) {
+ consumer_pos_->dequeue_chunk_poll_pos += 2 * chunk_size_;
+ }
consumer_pos_->dequeue_pos = NULL;
}
diff --git a/src/3rdparty/v8/src/circular-queue.h b/src/3rdparty/v8/src/circular-queue.h
index 73afc68..a8eb524 100644
--- a/src/3rdparty/v8/src/circular-queue.h
+++ b/src/3rdparty/v8/src/circular-queue.h
@@ -47,7 +47,8 @@ class SamplingCircularQueue {
// Executed on the application thread.
SamplingCircularQueue(int record_size_in_bytes,
int desired_chunk_size_in_bytes,
- int buffer_size_in_chunks);
+ int buffer_size_in_chunks,
+ bool keep_producer_consumer_distance = true);
~SamplingCircularQueue();
// Enqueue returns a pointer to a memory location for storing the next
@@ -88,7 +89,6 @@ class SamplingCircularQueue {
const int chunk_size_in_bytes_;
const int chunk_size_;
const int buffer_size_;
- const int producer_consumer_distance_;
Cell* buffer_;
byte* positions_;
ProducerPosition* producer_pos_;
diff --git a/src/3rdparty/v8/src/code-stubs-hydrogen.cc b/src/3rdparty/v8/src/code-stubs-hydrogen.cc
new file mode 100644
index 0000000..491e255
--- /dev/null
+++ b/src/3rdparty/v8/src/code-stubs-hydrogen.cc
@@ -0,0 +1,366 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "code-stubs.h"
+#include "hydrogen.h"
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+
+static LChunk* OptimizeGraph(HGraph* graph) {
+ Isolate* isolate = graph->isolate();
+ AssertNoAllocation no_gc;
+ NoHandleAllocation no_handles(isolate);
+ NoHandleDereference no_deref(isolate);
+
+ ASSERT(graph != NULL);
+ SmartArrayPointer<char> bailout_reason;
+ if (!graph->Optimize(&bailout_reason)) {
+ FATAL(bailout_reason.is_empty() ? "unknown" : *bailout_reason);
+ }
+ LChunk* chunk = LChunk::NewChunk(graph);
+ if (chunk == NULL) {
+ FATAL(graph->info()->bailout_reason());
+ }
+ return chunk;
+}
+
+
+class CodeStubGraphBuilderBase : public HGraphBuilder {
+ public:
+ CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
+ : HGraphBuilder(&info_), info_(stub, isolate), context_(NULL) {
+ int major_key = stub->MajorKey();
+ descriptor_ = info_.isolate()->code_stub_interface_descriptor(major_key);
+ if (descriptor_->register_param_count_ < 0) {
+ stub->InitializeInterfaceDescriptor(info_.isolate(), descriptor_);
+ }
+ parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
+ }
+ virtual bool BuildGraph();
+
+ protected:
+ virtual void BuildCodeStub() = 0;
+ HParameter* GetParameter(int parameter) { return parameters_[parameter]; }
+ CompilationInfo* info() { return &info_; }
+ HydrogenCodeStub* stub() { return info_.code_stub(); }
+ HContext* context() { return context_; }
+ Isolate* isolate() { return info_.isolate(); }
+
+ private:
+ SmartArrayPointer<HParameter*> parameters_;
+ CompilationInfoWithZone info_;
+ CodeStubInterfaceDescriptor* descriptor_;
+ HContext* context_;
+};
+
+
+bool CodeStubGraphBuilderBase::BuildGraph() {
+ if (FLAG_trace_hydrogen) {
+ const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
+ PrintF("-----------------------------------------------------------\n");
+ PrintF("Compiling stub %s using hydrogen\n", name);
+ HTracer::Instance()->TraceCompilation(&info_);
+ }
+
+ Zone* zone = this->zone();
+ HEnvironment* start_environment =
+ new(zone) HEnvironment(zone, descriptor_->register_param_count_);
+ HBasicBlock* next_block = CreateBasicBlock(start_environment);
+
+ current_block()->Goto(next_block);
+ next_block->SetJoinId(BailoutId::StubEntry());
+ set_current_block(next_block);
+
+ HConstant* undefined_constant = new(zone) HConstant(
+ isolate()->factory()->undefined_value(), Representation::Tagged());
+ AddInstruction(undefined_constant);
+ graph()->set_undefined_constant(undefined_constant);
+
+ int param_count = descriptor_->register_param_count_;
+ for (int i = 0; i < param_count; ++i) {
+ HParameter* param =
+ new(zone) HParameter(i, HParameter::REGISTER_PARAMETER);
+ AddInstruction(param);
+ start_environment->Bind(i, param);
+ parameters_[i] = param;
+ }
+
+ context_ = new(zone) HContext();
+ AddInstruction(context_);
+ start_environment->Bind(param_count, context_);
+
+ AddSimulate(BailoutId::StubEntry());
+
+ BuildCodeStub();
+
+ return true;
+}
+
+template <class Stub>
+class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
+ public:
+ explicit CodeStubGraphBuilder(Stub* stub)
+ : CodeStubGraphBuilderBase(Isolate::Current(), stub) {}
+
+ protected:
+ virtual void BuildCodeStub();
+ Stub* casted_stub() { return static_cast<Stub*>(stub()); }
+};
+
+
+template <>
+void CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+
+ HInstruction* boilerplate =
+ AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
+ GetParameter(1),
+ NULL,
+ FAST_ELEMENTS));
+
+ CheckBuilder builder(this, BailoutId::StubEntry());
+ builder.CheckNotUndefined(boilerplate);
+
+ int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize;
+ HValue* boilerplate_size =
+ AddInstruction(new(zone) HInstanceSize(boilerplate));
+ HValue* size_in_words =
+ AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2,
+ Representation::Integer32()));
+ builder.CheckIntegerEq(boilerplate_size, size_in_words);
+
+ HValue* size_in_bytes =
+ AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
+ HInstruction* object =
+ AddInstruction(new(zone) HAllocate(context(),
+ size_in_bytes,
+ HType::JSObject(),
+ HAllocate::CAN_ALLOCATE_IN_NEW_SPACE));
+
+ for (int i = 0; i < size; i += kPointerSize) {
+ HInstruction* value =
+ AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ AddInstruction(new(zone) HStoreNamedField(object,
+ factory->empty_string(),
+ value,
+ true, i));
+ AddSimulate(BailoutId::StubEntry());
+ }
+
+ builder.End();
+
+ HReturn* ret = new(zone) HReturn(object, context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> FastCloneShallowObjectStub::GenerateCode() {
+ CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+
+ HInstruction* load = BuildUncheckedMonomorphicElementAccess(
+ GetParameter(0), GetParameter(1), NULL, NULL,
+ casted_stub()->is_js_array(), casted_stub()->elements_kind(),
+ false, Representation::Tagged());
+ AddInstruction(load);
+
+ HReturn* ret = new(zone) HReturn(load, context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+ CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+
+ HValue* js_array = GetParameter(0);
+ HValue* map = GetParameter(1);
+
+ info()->MarkAsSavesCallerDoubles();
+
+ AddInstruction(new(zone) HTrapAllocationMemento(js_array));
+
+ HInstruction* array_length =
+ AddInstruction(new(zone) HJSArrayLength(js_array,
+ js_array,
+ HType::Smi()));
+
+ Heap* heap = isolate()->heap();
+ const int kMinFreeNewSpaceAfterGC =
+ ((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) /
+ kDoubleSize;
+
+ HConstant* max_alloc_size =
+ new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32());
+ AddInstruction(max_alloc_size);
+ // Since we're forcing Integer32 representation for this HBoundsCheck,
+ // there's no need to Smi-check the index.
+ AddInstruction(
+ new(zone) HBoundsCheck(array_length, max_alloc_size,
+ DONT_ALLOW_SMI_KEY, Representation::Integer32()));
+
+ IfBuilder if_builder(this, BailoutId::StubEntry());
+
+ if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ);
+
+ // Nothing to do, just change the map.
+
+ if_builder.BeginFalse();
+
+ HInstruction* elements =
+ AddInstruction(new(zone) HLoadElements(js_array, js_array));
+
+ HInstruction* elements_length =
+ AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+
+ ElementsKind to_kind = casted_stub()->to_kind();
+ HValue* new_elements =
+ BuildAllocateElements(context(), to_kind, elements_length);
+
+ // Fast elements kinds need to be initialized in case statements below cause a
+ // garbage collection.
+ Factory* factory = isolate()->factory();
+
+ ASSERT(!IsFastSmiElementsKind(to_kind));
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ HValue* hole = IsFastObjectElementsKind(to_kind)
+ ? AddInstruction(new(zone) HConstant(factory->the_hole_value(),
+ Representation::Tagged()))
+ : AddInstruction(new(zone) HConstant(nan_double,
+ Representation::Double()));
+
+ LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
+ BailoutId::StubEntry());
+
+ HValue* zero = graph()->GetConstant0();
+ HValue* start = IsFastElementsKind(to_kind) ? zero : array_length;
+ HValue* key = builder.BeginBody(start, elements_length, Token::LT);
+
+ AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind));
+ AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
+
+ builder.EndBody();
+
+ BuildCopyElements(context(), elements,
+ casted_stub()->from_kind(), new_elements,
+ to_kind, array_length);
+
+ AddInstruction(new(zone) HStoreNamedField(js_array,
+ factory->elements_field_string(),
+ new_elements, true,
+ JSArray::kElementsOffset));
+ AddSimulate(BailoutId::StubEntry());
+
+ if_builder.End();
+
+ AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
+ map, true, JSArray::kMapOffset));
+ AddSimulate(BailoutId::StubEntry());
+
+ HReturn* ret = new(zone) HReturn(js_array, context());
+ current_block()->Finish(ret);
+}
+
+
+template <>
+void CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> TransitionElementsKindStub::GenerateCode() {
+ CodeStubGraphBuilder<TransitionElementsKindStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
+void CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() {
+ HInstruction* deopt = new(zone()) HSoftDeoptimize();
+ AddInstruction(deopt);
+ current_block()->MarkAsDeoptimizing();
+ HReturn* ret = new(zone()) HReturn(GetParameter(0), context());
+ current_block()->Finish(ret);
+}
+
+
+Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() {
+ CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/code-stubs.cc b/src/3rdparty/v8/src/code-stubs.cc
index 7a72059..4a401cd 100644
--- a/src/3rdparty/v8/src/code-stubs.cc
+++ b/src/3rdparty/v8/src/code-stubs.cc
@@ -37,31 +37,17 @@
namespace v8 {
namespace internal {
-bool CodeStub::FindCodeInCache(Code** code_out) {
- Heap* heap = Isolate::Current()->heap();
- int index = heap->code_stubs()->FindEntry(GetKey());
+bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
+ UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
+ int index = stubs->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
- *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
+ *code_out = Code::cast(stubs->ValueAt(index));
return true;
}
return false;
}
-void CodeStub::GenerateCode(MacroAssembler* masm) {
- // Update the static counter each time a new code stub is generated.
- masm->isolate()->counters()->code_stubs()->Increment();
-
- // Nested stubs are not allowed for leaves.
- AllowStubCallsScope allow_scope(masm, false);
-
- // Generate the code for the stub.
- masm->set_generating_stub(true);
- NoCurrentFrameScope scope(masm);
- Generate(masm);
-}
-
-
SmartArrayPointer<const char> CodeStub::GetName() {
char buffer[100];
NoAllocationStringAllocator allocator(buffer,
@@ -72,8 +58,7 @@ SmartArrayPointer<const char> CodeStub::GetName() {
}
-void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
- Isolate* isolate = masm->isolate();
+void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) {
SmartArrayPointer<const char> name = GetName();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name));
GDBJIT(AddCode(GDBJITInterface::STUB, *name, code));
@@ -87,14 +72,49 @@ int CodeStub::GetCodeKind() {
}
-Handle<Code> CodeStub::GetCode() {
+Handle<Code> PlatformCodeStub::GenerateCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
+
+ // Generate the new code.
+ MacroAssembler masm(isolate, NULL, 256);
+
+ {
+ // Update the static counter each time a new code stub is generated.
+ isolate->counters()->code_stubs()->Increment();
+
+ // Nested stubs are not allowed for leaves.
+ AllowStubCallsScope allow_scope(&masm, false);
+
+ // Generate the code for the stub.
+ masm.set_generating_stub(true);
+ NoCurrentFrameScope scope(&masm);
+ Generate(&masm);
+ }
+
+ // Create the code object.
+ CodeDesc desc;
+ masm.GetCode(&desc);
+
+ // Copy the generated code into a heap object.
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()),
+ GetICState(),
+ GetExtraICState(),
+ GetStubType());
+ Handle<Code> new_object = factory->NewCode(
+ desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ return new_object;
+}
+
+
+Handle<Code> CodeStub::GetCode(Isolate* isolate) {
+ Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code)
- : FindCodeInCache(&code)) {
+ ? FindCodeInSpecialCache(&code, isolate)
+ : FindCodeInCache(&code, isolate)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
@@ -102,23 +122,10 @@ Handle<Code> CodeStub::GetCode() {
{
HandleScope scope(isolate);
- // Generate the new code.
- MacroAssembler masm(isolate, NULL, 256);
- GenerateCode(&masm);
-
- // Create the code object.
- CodeDesc desc;
- masm.GetCode(&desc);
-
- // Copy the generated code into a heap object.
- Code::Flags flags = Code::ComputeFlags(
- static_cast<Code::Kind>(GetCodeKind()),
- GetICState());
- Handle<Code> new_object = factory->NewCode(
- desc, flags, masm.CodeObject(), NeedsImmovableCode());
+ Handle<Code> new_object = GenerateCode();
new_object->set_major_key(MajorKey());
FinishCode(new_object);
- RecordCodeGeneration(*new_object, &masm);
+ RecordCodeGeneration(*new_object, isolate);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@@ -169,20 +176,135 @@ void CodeStub::PrintName(StringStream* stream) {
}
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
+ if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
+ // The OddballStub handles a number and an oddball, not two oddballs.
+ operands_type = BinaryOpIC::GENERIC;
+ }
+ switch (operands_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s+%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(left_type_),
+ BinaryOpIC::GetName(right_type_));
+}
+
+
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
+ GenerateBothStringStub(masm);
+ return;
+ }
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
Factory* factory = isolate->factory();
return Map::UpdateCodeCache(known_map_,
strict() ?
- factory->strict_compare_ic_symbol() :
- factory->compare_ic_symbol(),
+ factory->strict_compare_ic_string() :
+ factory->compare_ic_string(),
new_object);
}
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
- Isolate* isolate = known_map_->GetIsolate();
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
@@ -191,12 +313,18 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
Handle<Object> probe(
known_map_->FindInCodeCache(
strict() ?
- *factory->strict_compare_ic_symbol() :
- *factory->compare_ic_symbol(),
- flags));
+ *factory->strict_compare_ic_string() :
+ *factory->compare_ic_string(),
+ flags),
+ isolate);
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
- ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
+#ifdef DEBUG
+ Token::Value cached_op;
+ ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
+ &cached_op);
+ ASSERT(op_ == cached_op);
+#endif
return true;
}
return false;
@@ -204,7 +332,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
+ return OpField::encode(op_ - Token::EQ) |
+ LeftStateField::encode(left_) |
+ RightStateField::encode(right_) |
+ HandlerStateField::encode(state_);
+}
+
+
+void ICCompareStub::DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op) {
+ if (left_state) {
+ *left_state =
+ static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
+ }
+ if (right_state) {
+ *right_state =
+ static_cast<CompareIC::State>(RightStateField::decode(minor_key));
+ }
+ if (handler_state) {
+ *handler_state =
+ static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+ if (op) {
+ *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
+ }
}
@@ -213,27 +367,31 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMIS:
+ case CompareIC::SMI:
GenerateSmis(masm);
break;
- case CompareIC::HEAP_NUMBERS:
- GenerateHeapNumbers(masm);
+ case CompareIC::NUMBER:
+ GenerateNumbers(masm);
break;
- case CompareIC::STRINGS:
+ case CompareIC::STRING:
GenerateStrings(masm);
break;
- case CompareIC::SYMBOLS:
- GenerateSymbols(masm);
+ case CompareIC::INTERNALIZED_STRING:
+ GenerateInternalizedStrings(masm);
break;
- case CompareIC::OBJECTS:
+ case CompareIC::UNIQUE_NAME:
+ GenerateUniqueNames(masm);
+ break;
+ case CompareIC::OBJECT:
GenerateObjects(masm);
break;
- case CompareIC::KNOWN_OBJECTS:
+ case CompareIC::KNOWN_OBJECT:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- default:
- UNREACHABLE();
+ case CompareIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
}
}
@@ -269,36 +427,8 @@ void JSEntryStub::FinishCode(Handle<Code> code) {
}
-void KeyedLoadElementStub::Generate(MacroAssembler* masm) {
- switch (elements_kind_) {
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
- break;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm);
- break;
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
- break;
- case DICTIONARY_ELEMENTS:
- KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
- break;
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) {
+ KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
}
@@ -446,15 +576,16 @@ bool ToBooleanStub::Types::CanBeUndetectable() const {
void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
Label fail;
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_);
ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_));
if (!FLAG_trace_elements_transitions) {
if (IsFastSmiOrObjectElementsKind(to_)) {
if (IsFastSmiOrObjectElementsKind(from_)) {
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
+ GenerateMapChangeElementsTransition(masm, mode, &fail);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(!IsFastSmiElementsKind(to_));
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
} else {
UNREACHABLE();
}
@@ -464,14 +595,14 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
grow_mode_);
} else if (IsFastSmiElementsKind(from_) &&
IsFastDoubleElementsKind(to_)) {
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm,
is_jsarray_,
grow_mode_);
} else if (IsFastDoubleElementsKind(from_)) {
ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm);
+ GenerateMapChangeElementsTransition(masm, mode, &fail);
} else {
UNREACHABLE();
}
@@ -481,6 +612,14 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
+ int i = 0;
+ for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
+ StubFailureTrampolineStub(i).GetCode(isolate);
+ }
+}
+
+
FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL;
diff --git a/src/3rdparty/v8/src/code-stubs.h b/src/3rdparty/v8/src/code-stubs.h
index 8288f4d..1f187c0 100644
--- a/src/3rdparty/v8/src/code-stubs.h
+++ b/src/3rdparty/v8/src/code-stubs.h
@@ -47,6 +47,10 @@ namespace internal {
V(Compare) \
V(CompareIC) \
V(MathPow) \
+ V(ArrayLength) \
+ V(StringLength) \
+ V(FunctionPrototype) \
+ V(StoreArrayLength) \
V(RecordWrite) \
V(StoreBufferOverflow) \
V(RegExpExec) \
@@ -69,12 +73,19 @@ namespace internal {
V(CEntry) \
V(JSEntry) \
V(KeyedLoadElement) \
+ V(ArrayNoArgumentConstructor) \
+ V(ArraySingleArgumentConstructor) \
+ V(ArrayNArgumentsConstructor) \
V(KeyedStoreElement) \
V(DebuggerStatement) \
V(StringDictionaryLookup) \
V(ElementsTransitionAndStore) \
+ V(TransitionElementsKind) \
V(StoreArrayLiteralElement) \
- V(ProfileEntryHook)
+ V(StubFailureTrampoline) \
+ V(ProfileEntryHook) \
+ /* IC Handler stubs */ \
+ V(LoadField)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@@ -120,7 +131,7 @@ class CodeStub BASE_EMBEDDED {
};
// Retrieve the code for the stub. Generate the code if needed.
- Handle<Code> GetCode();
+ Handle<Code> GetCode(Isolate* isolate);
static Major MajorKeyFromKey(uint32_t key) {
return static_cast<Major>(MajorKeyBits::decode(key));
@@ -141,15 +152,15 @@ class CodeStub BASE_EMBEDDED {
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code));
+ CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current()));
return is_pregenerated;
}
// See comment above, where Instanceof is defined.
virtual bool IsPregenerated() { return false; }
- static void GenerateStubsAheadOfTime();
- static void GenerateFPStubs();
+ static void GenerateStubsAheadOfTime(Isolate* isolate);
+ static void GenerateFPStubs(Isolate* isolate);
// Some stubs put untagged junk on the stack that cannot be scanned by the
// GC. This means that we must be statically sure that no GC can occur while
@@ -160,22 +171,37 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out);
+ bool FindCodeInCache(Code** code_out, Isolate* isolate);
+
+ // Returns information for computing the number key.
+ virtual Major MajorKey() = 0;
+ virtual int MinorKey() = 0;
protected:
static bool CanUseFPRegisters();
- private:
- // Nonvirtual wrapper around the stub-specific Generate function. Call
- // this function to set up the macro assembler and generate the code.
- void GenerateCode(MacroAssembler* masm);
-
// Generates the assembler code for the stub.
- virtual void Generate(MacroAssembler* masm) = 0;
+ virtual Handle<Code> GenerateCode() = 0;
+
+ // BinaryOpStub needs to override this.
+ virtual InlineCacheState GetICState() {
+ return UNINITIALIZED;
+ }
+ virtual Code::ExtraICState GetExtraICState() {
+ return Code::kNoExtraICState;
+ }
+ virtual Code::StubType GetStubType() {
+ return Code::NORMAL;
+ }
+
+ // Returns whether the code generated for this stub needs to be allocated as
+ // a fixed (non-moveable) code object.
+ virtual bool NeedsImmovableCode() { return false; }
+ private:
// Perform bookkeeping required after code generation when stub code is
// initially generated.
- void RecordCodeGeneration(Code* code, MacroAssembler* masm);
+ void RecordCodeGeneration(Code* code, Isolate* isolate);
// Finish the code object after it has been generated.
virtual void FinishCode(Handle<Code> code) { }
@@ -184,25 +210,18 @@ class CodeStub BASE_EMBEDDED {
// registering stub in the stub cache.
virtual void Activate(Code* code) { }
- // Returns information for computing the number key.
- virtual Major MajorKey() = 0;
- virtual int MinorKey() = 0;
-
// BinaryOpStub needs to override this.
virtual int GetCodeKind();
- // BinaryOpStub needs to override this.
- virtual InlineCacheState GetICState() {
- return UNINITIALIZED;
- }
-
// Add the code to a specialized cache, specific to an individual
// stub type. Please note, this method must add the code object to a
// roots object, otherwise we will remove the code during GC.
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+ virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
+ return false;
+ }
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
@@ -211,10 +230,6 @@ class CodeStub BASE_EMBEDDED {
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
- // Returns whether the code generated for this stub needs to be allocated as
- // a fixed (non-moveable) code object.
- virtual bool NeedsImmovableCode() { return false; }
-
// Computes the key based on major and minor.
uint32_t GetKey() {
ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
@@ -230,6 +245,50 @@ class CodeStub BASE_EMBEDDED {
};
+class PlatformCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode();
+
+ virtual int GetCodeKind() { return Code::STUB; }
+
+ protected:
+ // Generates the assembler code for the stub.
+ virtual void Generate(MacroAssembler* masm) = 0;
+};
+
+
+struct CodeStubInterfaceDescriptor {
+ CodeStubInterfaceDescriptor()
+ : register_param_count_(-1),
+ stack_parameter_count_(NULL),
+ extra_expression_stack_count_(0),
+ register_params_(NULL) { }
+ int register_param_count_;
+ const Register* stack_parameter_count_;
+ int extra_expression_stack_count_;
+ Register* register_params_;
+ Address deoptimization_handler_;
+};
+
+
+class HydrogenCodeStub : public CodeStub {
+ public:
+ // Retrieve the code for the stub. Generate the code if needed.
+ virtual Handle<Code> GenerateCode() = 0;
+
+ virtual int GetCodeKind() { return Code::COMPILED_STUB; }
+
+ CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) {
+ return isolate->code_stub_interface_descriptor(MajorKey());
+ }
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) = 0;
+};
+
+
// Helper interface to prepare to/restore after making runtime calls.
class RuntimeCallHelper {
public:
@@ -287,7 +346,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper {
};
-class StackCheckStub : public CodeStub {
+class StackCheckStub : public PlatformCodeStub {
public:
StackCheckStub() { }
@@ -299,7 +358,7 @@ class StackCheckStub : public CodeStub {
};
-class InterruptStub : public CodeStub {
+class InterruptStub : public PlatformCodeStub {
public:
InterruptStub() { }
@@ -311,7 +370,7 @@ class InterruptStub : public CodeStub {
};
-class ToNumberStub: public CodeStub {
+class ToNumberStub: public PlatformCodeStub {
public:
ToNumberStub() { }
@@ -323,7 +382,7 @@ class ToNumberStub: public CodeStub {
};
-class FastNewClosureStub : public CodeStub {
+class FastNewClosureStub : public PlatformCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode)
: language_mode_(language_mode) { }
@@ -339,7 +398,7 @@ class FastNewClosureStub : public CodeStub {
};
-class FastNewContextStub : public CodeStub {
+class FastNewContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -357,7 +416,7 @@ class FastNewContextStub : public CodeStub {
};
-class FastNewBlockContextStub : public CodeStub {
+class FastNewBlockContextStub : public PlatformCodeStub {
public:
static const int kMaximumSlots = 64;
@@ -375,20 +434,25 @@ class FastNewBlockContextStub : public CodeStub {
};
-class FastCloneShallowArrayStub : public CodeStub {
+class FastCloneShallowArrayStub : public PlatformCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
-
enum Mode {
CLONE_ELEMENTS,
CLONE_DOUBLE_ELEMENTS,
COPY_ON_WRITE_ELEMENTS,
- CLONE_ANY_ELEMENTS
+ CLONE_ANY_ELEMENTS,
+ LAST_CLONE_MODE = CLONE_ANY_ELEMENTS
};
- FastCloneShallowArrayStub(Mode mode, int length)
+ static const int kFastCloneModeCount = LAST_CLONE_MODE + 1;
+
+ FastCloneShallowArrayStub(Mode mode,
+ AllocationSiteMode allocation_site_mode,
+ int length)
: mode_(mode),
+ allocation_site_mode_(allocation_site_mode),
length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
ASSERT_GE(length_, 0);
ASSERT_LE(length_, kMaximumClonedLength);
@@ -398,17 +462,26 @@ class FastCloneShallowArrayStub : public CodeStub {
private:
Mode mode_;
+ AllocationSiteMode allocation_site_mode_;
int length_;
+ class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
+ class ModeBits: public BitField<Mode, 1, 4> {};
+ class LengthBits: public BitField<int, 5, 4> {};
+ // Ensure data fits within available bits.
+ STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1);
+ STATIC_ASSERT(kFastCloneModeCount < 16);
+ STATIC_ASSERT(kMaximumClonedLength < 16);
Major MajorKey() { return FastCloneShallowArray; }
int MinorKey() {
- ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3);
- return length_ * 4 + mode_;
+ return AllocationSiteModeBits::encode(allocation_site_mode_)
+ | ModeBits::encode(mode_)
+ | LengthBits::encode(length_);
}
};
-class FastCloneShallowObjectStub : public CodeStub {
+class FastCloneShallowObjectStub : public HydrogenCodeStub {
public:
// Maximum number of properties in copied object.
static const int kMaximumClonedProperties = 6;
@@ -418,17 +491,25 @@ class FastCloneShallowObjectStub : public CodeStub {
ASSERT_LE(length_, kMaximumClonedProperties);
}
- void Generate(MacroAssembler* masm);
+ int length() const { return length_; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
int length_;
Major MajorKey() { return FastCloneShallowObject; }
int MinorKey() { return length_; }
+
+ DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
};
-class InstanceofStub: public CodeStub {
+class InstanceofStub: public PlatformCodeStub {
public:
enum Flags {
kNoFlags = 0,
@@ -466,7 +547,7 @@ class InstanceofStub: public CodeStub {
};
-class MathPowStub: public CodeStub {
+class MathPowStub: public PlatformCodeStub {
public:
enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
@@ -482,150 +563,328 @@ class MathPowStub: public CodeStub {
};
-class ICCompareStub: public CodeStub {
+class ICStub: public PlatformCodeStub {
public:
- ICCompareStub(Token::Value op, CompareIC::State state)
- : op_(op), state_(state) {
- ASSERT(Token::IsCompareOp(op));
+ explicit ICStub(Code::Kind kind) : kind_(kind) { }
+ virtual int GetCodeKind() { return kind_; }
+ virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+
+ bool Describes(Code* code) {
+ return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey();
+ }
+
+ protected:
+ class KindBits: public BitField<Code::Kind, 0, 4> {};
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+ Code::Kind kind() { return kind_; }
+
+ virtual int MinorKey() {
+ return KindBits::encode(kind_);
}
+ private:
+ Code::Kind kind_;
+};
+
+
+class ArrayLengthStub: public ICStub {
+ public:
+ explicit ArrayLengthStub(Code::Kind kind) : ICStub(kind) { }
virtual void Generate(MacroAssembler* masm);
- void set_known_map(Handle<Map> map) { known_map_ = map; }
+ private:
+ virtual CodeStub::Major MajorKey() { return ArrayLength; }
+};
+
+
+class FunctionPrototypeStub: public ICStub {
+ public:
+ explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { }
+ virtual void Generate(MacroAssembler* masm);
private:
- class OpField: public BitField<int, 0, 3> { };
- class StateField: public BitField<int, 3, 5> { };
+ virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
+};
- virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(state_);
- code->set_compare_operation(op_ - Token::EQ);
+
+class StringLengthStub: public ICStub {
+ public:
+ StringLengthStub(Code::Kind kind, bool support_wrapper)
+ : ICStub(kind), support_wrapper_(support_wrapper) { }
+ virtual void Generate(MacroAssembler* masm);
+
+ private:
+ STATIC_ASSERT(KindBits::kSize == 4);
+ class WrapperModeBits: public BitField<bool, 4, 1> {};
+ virtual CodeStub::Major MajorKey() { return StringLength; }
+ virtual int MinorKey() {
+ return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_);
}
- virtual CodeStub::Major MajorKey() { return CompareIC; }
- virtual int MinorKey();
+ bool support_wrapper_;
+};
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- void GenerateSmis(MacroAssembler* masm);
- void GenerateHeapNumbers(MacroAssembler* masm);
- void GenerateSymbols(MacroAssembler* masm);
- void GenerateStrings(MacroAssembler* masm);
- void GenerateObjects(MacroAssembler* masm);
- void GenerateMiss(MacroAssembler* masm);
- void GenerateKnownObjects(MacroAssembler* masm);
+class StoreICStub: public ICStub {
+ public:
+ StoreICStub(Code::Kind kind, StrictModeFlag strict_mode)
+ : ICStub(kind), strict_mode_(strict_mode) { }
- bool strict() const { return op_ == Token::EQ_STRICT; }
- Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+ protected:
+ virtual Code::ExtraICState GetExtraICState() {
+ return strict_mode_;
+ }
- virtual void AddToSpecialCache(Handle<Code> new_object);
- virtual bool FindCodeInSpecialCache(Code** code_out);
- virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+ private:
+ STATIC_ASSERT(KindBits::kSize == 4);
+ class StrictModeBits: public BitField<bool, 4, 1> {};
+ virtual int MinorKey() {
+ return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
+ }
- Token::Value op_;
- CompareIC::State state_;
- Handle<Map> known_map_;
+ StrictModeFlag strict_mode_;
};
-// Flags that control the compare stub code generation.
-enum CompareFlags {
- NO_COMPARE_FLAGS = 0,
- NO_SMI_COMPARE_IN_STUB = 1 << 0,
- NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
- CANT_BOTH_BE_NAN = 1 << 2
+class StoreArrayLengthStub: public StoreICStub {
+ public:
+ explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode)
+ : StoreICStub(kind, strict_mode) { }
+ virtual void Generate(MacroAssembler* masm);
+
+ private:
+ virtual CodeStub::Major MajorKey() { return StoreArrayLength; }
};
-enum NaNInformation {
- kBothCouldBeNaN,
- kCantBothBeNaN
+class HandlerStub: public ICStub {
+ public:
+ explicit HandlerStub(Code::Kind kind) : ICStub(kind) { }
+
+ protected:
+ virtual Code::ExtraICState GetExtraICState() {
+ return Code::HANDLER_FRAGMENT;
+ }
};
-class CompareStub: public CodeStub {
+class LoadFieldStub: public HandlerStub {
public:
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags,
- Register lhs,
- Register rhs) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(lhs),
- rhs_(rhs) { }
-
- CompareStub(Condition cc,
- bool strict,
- CompareFlags flags) :
- cc_(cc),
- strict_(strict),
- never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
- include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
- include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
- lhs_(no_reg),
- rhs_(no_reg) { }
+ LoadFieldStub(Register reg, bool inobject, int index)
+ : HandlerStub(Code::LOAD_IC),
+ reg_(reg),
+ inobject_(inobject),
+ index_(index) { }
+ virtual void Generate(MacroAssembler* masm);
- void Generate(MacroAssembler* masm);
+ protected:
+ virtual Code::StubType GetStubType() { return Code::FIELD; }
private:
- Condition cc_;
- bool strict_;
- // Only used for 'equal' comparisons. Tells the stub that we already know
- // that at least one side of the comparison is not NaN. This allows the
- // stub to use object identity in the positive case. We ignore it when
- // generating the minor key for other comparisons to avoid creating more
- // stubs.
- bool never_nan_nan_;
- // Do generate the number comparison code in the stub. Stubs without number
- // comparison code is used when the number comparison has been inlined, and
- // the stub will be called if one of the operands is not a number.
- bool include_number_compare_;
-
- // Generate the comparison code for two smi operands in the stub.
- bool include_smi_compare_;
-
- // Register holding the left hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
-
- Register lhs_;
- // Register holding the right hand side of the comparison if the stub gives
- // a choice, no_reg otherwise.
- Register rhs_;
-
- // Encoding of the minor key in 16 bits.
- class StrictField: public BitField<bool, 0, 1> {};
- class NeverNanNanField: public BitField<bool, 1, 1> {};
- class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
- class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
- class RegisterField: public BitField<bool, 4, 1> {};
- class ConditionField: public BitField<int, 5, 11> {};
-
- Major MajorKey() { return Compare; }
+ STATIC_ASSERT(KindBits::kSize == 4);
+ class RegisterBits: public BitField<int, 4, 6> {};
+ class InobjectBits: public BitField<bool, 10, 1> {};
+ class IndexBits: public BitField<int, 11, 11> {};
+ virtual CodeStub::Major MajorKey() { return LoadField; }
+ virtual int MinorKey() {
+ return KindBits::encode(kind())
+ | RegisterBits::encode(reg_.code())
+ | InobjectBits::encode(inobject_)
+ | IndexBits::encode(index_);
+ }
- int MinorKey();
+ Register reg_;
+ bool inobject_;
+ int index_;
+};
- virtual int GetCodeKind() { return Code::COMPARE_IC; }
- virtual void FinishCode(Handle<Code> code) {
- code->set_compare_state(CompareIC::GENERIC);
+
+class BinaryOpStub: public PlatformCodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ platform_specific_bit_(false),
+ left_type_(BinaryOpIC::UNINITIALIZED),
+ right_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ Initialize();
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
- // Branch to the label if the given object isn't a symbol.
- void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch);
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ platform_specific_bit_(PlatformSpecificBits::decode(key)),
+ left_type_(left_type),
+ right_type_(right_type),
+ result_type_(result_type) { }
+
+ static void decode_types_from_minor_key(int minor_key,
+ BinaryOpIC::TypeInfo* left_type,
+ BinaryOpIC::TypeInfo* right_type,
+ BinaryOpIC::TypeInfo* result_type) {
+ *left_type =
+ static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
+ *right_type =
+ static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
+ *result_type =
+ static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
+ }
+
+ static Token::Value decode_op_from_minor_key(int minor_key) {
+ return static_cast<Token::Value>(OpBits::decode(minor_key));
+ }
+
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo left_type_;
+ BinaryOpIC::TypeInfo right_type_;
+ BinaryOpIC::TypeInfo result_type_;
- // Unfortunately you have to run without snapshots to see most of these
- // names in the profile since most compare stubs end up in the snapshot.
virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class PlatformSpecificBits: public BitField<bool, 9, 1> {};
+ class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+ class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | PlatformSpecificBits::encode(platform_specific_bit_)
+ | LeftTypeBits::encode(left_type_)
+ | RightTypeBits::encode(right_type_)
+ | ResultTypeBits::encode(result_type_);
+ }
+
+
+ // Platform-independent implementation.
+ void Generate(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ // Platform-independent signature, platform-specific implementation.
+ void Initialize();
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+
+ // Entirely platform-specific methods are defined as static helper
+ // functions in the <arch>/code-stubs-<arch>.cc files.
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(Max(left_type_, right_type_));
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+
+ friend class CodeGenerator;
+};
+
+
+class ICCompareStub: public PlatformCodeStub {
+ public:
+ ICCompareStub(Token::Value op,
+ CompareIC::State left,
+ CompareIC::State right,
+ CompareIC::State handler)
+ : op_(op),
+ left_(left),
+ right_(right),
+ state_(handler) {
+ ASSERT(Token::IsCompareOp(op));
+ }
+
+ virtual void Generate(MacroAssembler* masm);
+
+ void set_known_map(Handle<Map> map) { known_map_ = map; }
+
+ static void DecodeMinorKey(int minor_key,
+ CompareIC::State* left_state,
+ CompareIC::State* right_state,
+ CompareIC::State* handler_state,
+ Token::Value* op);
+
+ static CompareIC::State CompareState(int minor_key) {
+ return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
+ }
+
+ private:
+ class OpField: public BitField<int, 0, 3> { };
+ class LeftStateField: public BitField<int, 3, 4> { };
+ class RightStateField: public BitField<int, 7, 4> { };
+ class HandlerStateField: public BitField<int, 11, 4> { };
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_stub_info(MinorKey());
+ }
+
+ virtual CodeStub::Major MajorKey() { return CompareIC; }
+ virtual int MinorKey();
+
+ virtual int GetCodeKind() { return Code::COMPARE_IC; }
+
+ void GenerateSmis(MacroAssembler* masm);
+ void GenerateNumbers(MacroAssembler* masm);
+ void GenerateInternalizedStrings(MacroAssembler* masm);
+ void GenerateStrings(MacroAssembler* masm);
+ void GenerateUniqueNames(MacroAssembler* masm);
+ void GenerateObjects(MacroAssembler* masm);
+ void GenerateMiss(MacroAssembler* masm);
+ void GenerateKnownObjects(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+
+ bool strict() const { return op_ == Token::EQ_STRICT; }
+ Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+
+ virtual void AddToSpecialCache(Handle<Code> new_object);
+ virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
+ virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
+
+ Token::Value op_;
+ CompareIC::State left_;
+ CompareIC::State right_;
+ CompareIC::State state_;
+ Handle<Map> known_map_;
};
-class CEntryStub : public CodeStub {
+class CEntryStub : public PlatformCodeStub {
public:
explicit CEntryStub(int result_size,
SaveFPRegsMode save_doubles = kDontSaveFPRegs)
@@ -638,7 +897,7 @@ class CEntryStub : public CodeStub {
// their code generation. On machines that always have gp registers (x64) we
// can generate both variants ahead of time.
virtual bool IsPregenerated();
- static void GenerateAheadOfTime();
+ static void GenerateAheadOfTime(Isolate* isolate);
private:
void GenerateCore(MacroAssembler* masm,
@@ -659,7 +918,7 @@ class CEntryStub : public CodeStub {
};
-class JSEntryStub : public CodeStub {
+class JSEntryStub : public PlatformCodeStub {
public:
JSEntryStub() { }
@@ -693,7 +952,7 @@ class JSConstructEntryStub : public JSEntryStub {
};
-class ArgumentsAccessStub: public CodeStub {
+class ArgumentsAccessStub: public PlatformCodeStub {
public:
enum Type {
READ_ELEMENT,
@@ -720,7 +979,7 @@ class ArgumentsAccessStub: public CodeStub {
};
-class RegExpExecStub: public CodeStub {
+class RegExpExecStub: public PlatformCodeStub {
public:
RegExpExecStub() { }
@@ -732,7 +991,7 @@ class RegExpExecStub: public CodeStub {
};
-class RegExpConstructResultStub: public CodeStub {
+class RegExpConstructResultStub: public PlatformCodeStub {
public:
RegExpConstructResultStub() { }
@@ -744,7 +1003,7 @@ class RegExpConstructResultStub: public CodeStub {
};
-class CallFunctionStub: public CodeStub {
+class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(int argc, CallFunctionFlags flags)
: argc_(argc), flags_(flags) { }
@@ -785,7 +1044,7 @@ class CallFunctionStub: public CodeStub {
};
-class CallConstructStub: public CodeStub {
+class CallConstructStub: public PlatformCodeStub {
public:
explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {}
@@ -860,6 +1119,13 @@ class StringCharCodeAtGenerator {
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ masm->bind(&index_not_smi_);
+ masm->bind(&call_runtime_);
+ masm->jmp(bailout);
+ }
+
private:
Register object_;
Register index_;
@@ -900,6 +1166,12 @@ class StringCharFromCodeGenerator {
void GenerateSlow(MacroAssembler* masm,
const RuntimeCallHelper& call_helper);
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ masm->bind(&slow_case_);
+ masm->jmp(bailout);
+ }
+
private:
Register code_;
Register result_;
@@ -942,13 +1214,25 @@ class StringCharAtGenerator {
// Generates the fast case code. On the fallthrough path |result|
// register contains the result.
- void GenerateFast(MacroAssembler* masm);
+ void GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+ }
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
void GenerateSlow(MacroAssembler* masm,
- const RuntimeCallHelper& call_helper);
+ const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
+ }
+
+ // Skip handling slow case and directly jump to bailout.
+ void SkipSlow(MacroAssembler* masm, Label* bailout) {
+ char_code_at_generator_.SkipSlow(masm, bailout);
+ char_from_code_generator_.SkipSlow(masm, bailout);
+ }
private:
StringCharCodeAtGenerator char_code_at_generator_;
@@ -976,25 +1260,145 @@ class AllowStubCallsScope {
};
-class KeyedLoadElementStub : public CodeStub {
+class KeyedLoadDictionaryElementStub : public PlatformCodeStub {
public:
- explicit KeyedLoadElementStub(ElementsKind elements_kind)
- : elements_kind_(elements_kind)
- { }
+ KeyedLoadDictionaryElementStub() {}
Major MajorKey() { return KeyedLoadElement; }
- int MinorKey() { return elements_kind_; }
+ int MinorKey() { return DICTIONARY_ELEMENTS; }
void Generate(MacroAssembler* masm);
private:
- ElementsKind elements_kind_;
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+};
+
+
+class KeyedLoadFastElementStub : public HydrogenCodeStub {
+ public:
+ KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) {
+ bit_field_ = ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array);
+ }
+
+ bool is_js_array() const {
+ return IsJSArrayBits::decode(bit_field_);
+ }
+
+ ElementsKind elements_kind() const {
+ return ElementsKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class IsJSArrayBits: public BitField<bool, 8, 1> {};
+ class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
+ uint32_t bit_field_;
+
+ Major MajorKey() { return KeyedLoadElement; }
+ int MinorKey() { return bit_field_; }
+
+ DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+};
+
+
+class TransitionElementsKindStub : public HydrogenCodeStub {
+ public:
+ TransitionElementsKindStub(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ bit_field_ = FromKindBits::encode(from_kind) |
+ ToKindBits::encode(to_kind);
+ }
+
+ ElementsKind from_kind() const {
+ return FromKindBits::decode(bit_field_);
+ }
+
+ ElementsKind to_kind() const {
+ return ToKindBits::decode(bit_field_);
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ class FromKindBits: public BitField<ElementsKind, 8, 8> {};
+ class ToKindBits: public BitField<ElementsKind, 0, 8> {};
+ uint32_t bit_field_;
+
+ Major MajorKey() { return TransitionElementsKind; }
+ int MinorKey() { return bit_field_; }
+
+ DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
+};
+
+
+class ArrayNoArgumentConstructorStub : public HydrogenCodeStub {
+ public:
+ ArrayNoArgumentConstructorStub() {
+ }
+
+ Major MajorKey() { return ArrayNoArgumentConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
+};
+
+
+class ArraySingleArgumentConstructorStub : public HydrogenCodeStub {
+ public:
+ ArraySingleArgumentConstructorStub() {
+ }
+
+ Major MajorKey() { return ArraySingleArgumentConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
- DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub);
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
};
-class KeyedStoreElementStub : public CodeStub {
+class ArrayNArgumentsConstructorStub : public HydrogenCodeStub {
+ public:
+ ArrayNArgumentsConstructorStub() {
+ }
+
+ Major MajorKey() { return ArrayNArgumentsConstructor; }
+ int MinorKey() { return 0; }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
+};
+
+
+class KeyedStoreElementStub : public PlatformCodeStub {
public:
KeyedStoreElementStub(bool is_js_array,
ElementsKind elements_kind,
@@ -1029,7 +1433,7 @@ class KeyedStoreElementStub : public CodeStub {
};
-class ToBooleanStub: public CodeStub {
+class ToBooleanStub: public PlatformCodeStub {
public:
enum Type {
UNDEFINED,
@@ -1053,6 +1457,9 @@ class ToBooleanStub: public CodeStub {
bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
+ bool ContainsAnyOf(Types types) const {
+ return set_.ContainsAnyOf(types.set_);
+ }
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const;
@@ -1096,7 +1503,7 @@ class ToBooleanStub: public CodeStub {
};
-class ElementsTransitionAndStoreStub : public CodeStub {
+class ElementsTransitionAndStoreStub : public PlatformCodeStub {
public:
ElementsTransitionAndStoreStub(ElementsKind from,
ElementsKind to,
@@ -1137,7 +1544,7 @@ class ElementsTransitionAndStoreStub : public CodeStub {
};
-class StoreArrayLiteralElementStub : public CodeStub {
+class StoreArrayLiteralElementStub : public PlatformCodeStub {
public:
StoreArrayLiteralElementStub()
: fp_registers_(CanUseFPRegisters()) { }
@@ -1156,7 +1563,30 @@ class StoreArrayLiteralElementStub : public CodeStub {
};
-class ProfileEntryHookStub : public CodeStub {
+class StubFailureTrampolineStub : public PlatformCodeStub {
+ public:
+ static const int kMaxExtraExpressionStackCount = 1;
+
+ explicit StubFailureTrampolineStub(int extra_expression_stack_count)
+ : extra_expression_stack_count_(extra_expression_stack_count) {}
+
+ virtual bool IsPregenerated() { return true; }
+
+ static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+ Major MajorKey() { return StubFailureTrampoline; }
+ int MinorKey() { return extra_expression_stack_count_; }
+
+ void Generate(MacroAssembler* masm);
+
+ int extra_expression_stack_count_;
+
+ DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
+};
+
+
+class ProfileEntryHookStub : public PlatformCodeStub {
public:
explicit ProfileEntryHookStub() {}
diff --git a/src/3rdparty/v8/src/codegen.cc b/src/3rdparty/v8/src/codegen.cc
index 0163580..508e221 100644
--- a/src/3rdparty/v8/src/codegen.cc
+++ b/src/3rdparty/v8/src/codegen.cc
@@ -76,16 +76,22 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
if (FLAG_trace_codegen || print_source || print_ast) {
PrintF("*** Generate code for %s function: ", ftype);
- info->function()->name()->ShortPrint();
+ if (info->IsStub()) {
+ const char* name =
+ CodeStub::MajorName(info->code_stub()->MajorKey(), true);
+ PrintF("%s", name == NULL ? "<unknown>" : name);
+ } else {
+ info->function()->name()->ShortPrint();
+ }
PrintF(" ***\n");
}
- if (print_source) {
+ if (!info->IsStub() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter().PrintProgram(info->function()));
}
- if (print_ast) {
+ if (!info->IsStub() && print_ast) {
PrintF("--- AST ---\n%s\n",
AstPrinter().PrintProgram(info->function()));
}
@@ -107,6 +113,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
+ code->set_prologue_offset(info->prologue_offset());
}
return code;
}
@@ -116,23 +123,29 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
- : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
+ : (FLAG_print_code ||
+ (info->IsStub() && FLAG_print_code_stubs) ||
+ (info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
// Print the source code if available.
FunctionLiteral* function = info->function();
- Handle<Script> script = info->script();
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(function->start_position());
- // fun->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len =
- function->end_position() - function->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
+ if (code->kind() != Code::COMPILED_STUB) {
+ Handle<Script> script = info->script();
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(String::cast(script->source()),
+ &op,
+ function->start_position());
+ // fun->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len =
+ function->end_position() - function->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.HasMore()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
}
- PrintF("\n\n");
}
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code) {
@@ -144,7 +157,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
} else {
PrintF("--- Code ---\n");
}
- code->Disassemble(*function->debug_name()->ToCString());
+ if (info->IsStub()) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ code->Disassemble(CodeStub::MajorName(major_key, false));
+ } else {
+ code->Disassemble(*function->debug_name()->ToCString());
+ }
}
#endif // ENABLE_DISASSEMBLER
}
@@ -158,7 +176,7 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
}
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
- if (name->IsEqualTo(CStrVector("regexp")))
+ if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp")))
return true;
}
return false;
diff --git a/src/3rdparty/v8/src/codegen.h b/src/3rdparty/v8/src/codegen.h
index 08a777f..09907c4 100644
--- a/src/3rdparty/v8/src/codegen.h
+++ b/src/3rdparty/v8/src/codegen.h
@@ -90,19 +90,41 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
+UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
class ElementsTransitionGenerator : public AllStatic {
public:
- static void GenerateMapChangeElementsTransition(MacroAssembler* masm);
- static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail);
- static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail);
+ // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
+ // |allocation_site_info_found| may be NULL.
+ static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* allocation_site_info_found);
+ static void GenerateSmiToDouble(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* fail);
+ static void GenerateDoubleToObject(MacroAssembler* masm,
+ AllocationSiteMode mode,
+ Label* fail);
private:
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
+
+class SeqStringSetCharGenerator : public AllStatic {
+ public:
+ static void Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
+};
+
+
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/src/3rdparty/v8/src/compiler.cc b/src/3rdparty/v8/src/compiler.cc
index b1f40b6..1e53cfe 100644
--- a/src/3rdparty/v8/src/compiler.cc
+++ b/src/3rdparty/v8/src/compiler.cc
@@ -52,57 +52,72 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
- : isolate_(script->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+ : flags_(LanguageModeField::encode(CLASSIC_MODE)),
script_(script),
- extension_(NULL),
- pre_parse_data_(NULL),
- osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(script->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : isolate_(shared_info->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
- osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(script_->GetIsolate(), BASE, zone);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
- : isolate_(closure->GetIsolate()),
- flags_(LanguageModeField::encode(CLASSIC_MODE) |
- IsLazy::encode(true)),
- function_(NULL),
- scope_(NULL),
- global_scope_(NULL),
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
- extension_(NULL),
- pre_parse_data_(NULL),
context_(closure->context()),
- osr_ast_id_(BailoutId::None()),
- zone_(zone),
- deferred_handles_(NULL) {
- Initialize(BASE);
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(script_->GetIsolate(), BASE, zone);
+}
+
+
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
+ Isolate* isolate, Zone* zone)
+ : flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ osr_ast_id_(BailoutId::None()) {
+ Initialize(isolate, STUB, zone);
+ code_stub_ = stub;
+}
+
+
+void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) {
+ isolate_ = isolate;
+ function_ = NULL;
+ scope_ = NULL;
+ global_scope_ = NULL;
+ extension_ = NULL;
+ pre_parse_data_ = NULL;
+ zone_ = zone;
+ deferred_handles_ = NULL;
+ code_stub_ = NULL;
+ prologue_offset_ = kPrologueOffsetNotSet;
+ opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
+ if (mode == STUB) {
+ mode_ = STUB;
+ return;
+ }
+ mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ if (script_->type()->value() == Script::TYPE_NATIVE) {
+ MarkAsNative();
+ }
+ if (!shared_info_.is_null()) {
+ ASSERT(language_mode() == CLASSIC_MODE);
+ SetLanguageMode(shared_info_->language_mode());
+ }
+ if (!shared_info_.is_null() && shared_info_->qml_mode()) {
+ MarkAsQmlMode();
+ }
+ set_bailout_reason("unknown");
}
@@ -111,6 +126,33 @@ CompilationInfo::~CompilationInfo() {
}
+int CompilationInfo::num_parameters() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_parameters();
+ }
+}
+
+
+int CompilationInfo::num_heap_slots() const {
+ if (IsStub()) {
+ return 0;
+ } else {
+ return scope()->num_heap_slots();
+ }
+}
+
+
+Code::Flags CompilationInfo::flags() const {
+ if (IsStub()) {
+ return Code::ComputeFlags(Code::COMPILED_STUB);
+ } else {
+ return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ }
+}
+
+
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
bool is_optimizable_closure =
@@ -194,6 +236,11 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size,
compilation_time);
}
+ if (FLAG_hydrogen_stats) {
+ HStatistics::Instance()->IncrementSubtotals(time_taken_to_create_graph_,
+ time_taken_to_optimize_,
+ time_taken_to_codegen_);
+ }
}
@@ -242,7 +289,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// the optimizing compiler.
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
- if (info()->shared_info()->opt_count() > kMaxOptCount) {
+ if (info()->opt_count() > kMaxOptCount) {
info()->set_bailout_reason("optimized too many times");
return AbortOptimization();
}
@@ -273,8 +320,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
if (*FLAG_hydrogen_filter != '\0') {
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
if ((filter[0] == '-'
- && name->IsEqualTo(filter.SubVector(1, filter.length())))
- || (filter[0] != '-' && !name->IsEqualTo(filter))) {
+ && name->IsUtf8EqualTo(filter.SubVector(1, filter.length())))
+ || (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) {
info()->SetCode(code);
return SetLastStatus(BAILED_OUT);
}
@@ -284,7 +331,6 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
- Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
@@ -317,14 +363,15 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
if (FLAG_trace_hydrogen) {
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling method %s using hydrogen\n", *name->ToCString());
- HTracer::Instance()->TraceCompilation(info()->function());
+ HTracer::Instance()->TraceCompilation(info());
}
Handle<Context> native_context(
info()->closure()->context()->native_context());
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
- graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
- HPhase phase(HPhase::kTotal);
+ graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_);
+
+ Timer t(this, &time_taken_to_create_graph_);
graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) {
@@ -350,7 +397,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
AssertNoAllocation no_gc;
- NoHandleAllocation no_handles;
+ NoHandleAllocation no_handles(isolate());
+ NoHandleDereference no_deref(isolate());
ASSERT(last_status() == SUCCEEDED);
Timer t(this, &time_taken_to_optimize_);
@@ -371,15 +419,17 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
- Timer timer(this, &time_taken_to_codegen_);
- ASSERT(chunk_ != NULL);
- ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen();
- if (optimized_code.is_null()) {
- info()->set_bailout_reason("code generation failed");
- return AbortOptimization();
+ { // Scope for timer.
+ Timer timer(this, &time_taken_to_codegen_);
+ ASSERT(chunk_ != NULL);
+ ASSERT(graph_ != NULL);
+ Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION);
+ if (optimized_code.is_null()) {
+ info()->set_bailout_reason("code generation failed");
+ return AbortOptimization();
+ }
+ info()->SetCode(optimized_code);
}
- info()->SetCode(optimized_code);
RecordOptimizationStats();
return SetLastStatus(SUCCEEDED);
}
@@ -390,6 +440,8 @@ static bool GenerateCode(CompilationInfo* info) {
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
@@ -397,6 +449,8 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT.
info->DisableOptimization();
}
+ Logger::TimerEventScope timer(
+ info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info);
}
}
@@ -425,6 +479,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
#endif
+static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
+ bool allow_lazy_without_ctx = false) {
+ return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+ (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
+}
+
+
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate();
ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT);
@@ -432,7 +493,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
- script->set_context_data((*isolate->native_context())->data());
+ // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
+ FixedArray* array = isolate->native_context()->embedder_data();
+ script->set_context_data(array->get(0));
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@@ -460,8 +523,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
ParsingFlags flags = kNoParsingFlags;
- if (info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) {
+ if ((info->pre_parse_data() != NULL ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+ !DebuggerWantsEagerCompilation(info)) {
flags = kAllowLazy;
}
if (!ParserApi::Parse(info, flags)) {
@@ -539,16 +603,17 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
}
-Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag natives,
- v8::Script::CompileFlags compile_flags) {
+Handle<SharedFunctionInfo> Compiler::Compile(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data,
+ Handle<Object> script_data,
+ NativesFlag natives,
+ v8::Script::CompileFlags compile_flags) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length);
@@ -611,7 +676,6 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
- result->code()->MakeYoung();
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -675,7 +739,6 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
if (result->ic_age() != HEAP->global_ic_age()) {
result->ResetForNewContext(HEAP->global_ic_age());
}
- result->code()->MakeYoung();
}
return result;
@@ -694,7 +757,7 @@ static bool InstallFullCode(CompilationInfo* info) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
- shared->set_code(*code);
+ shared->ReplaceCode(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
@@ -833,7 +896,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
if (info->IsOptimizing()) {
Handle<Code> code = info->code();
- ASSERT(shared->scope_info() != ScopeInfo::Empty());
+ ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
InsertCodeIntoOptimizedCodeMap(info);
return true;
@@ -853,6 +916,11 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
+ // Here we prepare compile data for the parallel recompilation thread, but
+ // this still happens synchronously and interrupts execution.
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
@@ -861,7 +929,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
+ VMState state(isolate, PARALLEL_COMPILER);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
@@ -872,7 +940,10 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
{
CompilationHandleScope handle_scope(*info);
- if (InstallCodeFromOptimizedCodeMap(*info)) return;
+ if (!FLAG_manual_parallel_recompilation &&
+ InstallCodeFromOptimizedCodeMap(*info)) {
+ return;
+ }
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
@@ -906,6 +977,10 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
+ Isolate* isolate = info->isolate();
+ VMState state(isolate, PARALLEL_COMPILER);
+ Logger::TimerEventScope timer(
+ isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
@@ -922,7 +997,7 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
InstallCodeCommon(*info);
if (status == OptimizingCompiler::SUCCEEDED) {
Handle<Code> code = info->code();
- ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty());
+ ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate));
info->closure()->ReplaceCode(*code);
if (info->shared_info()->SearchOptimizedCodeMap(
info->closure()->context()->native_context()) == -1) {
@@ -943,7 +1018,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.SetScope(literal->scope());
info.SetLanguageMode(literal->scope()->language_mode());
- LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
+ Isolate* isolate = info.isolate();
+ LiveEditFunctionTracker live_edit_tracker(isolate, literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
@@ -955,14 +1031,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
bool allow_lazy = literal->AllowsLazyCompilation() &&
- !LiveEditFunctionTracker::IsActive(info.isolate()) &&
- (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx);
+ !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate));
// Generate code
if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
- Handle<Code> code = info.isolate()->builtins()->LazyCompile();
+ Handle<Code> code = isolate->builtins()->LazyCompile();
info.SetCode(code);
} else if (GenerateCode(&info)) {
ASSERT(!info.code().is_null());
diff --git a/src/3rdparty/v8/src/compiler.h b/src/3rdparty/v8/src/compiler.h
index b119775..6abaafa 100644
--- a/src/3rdparty/v8/src/compiler.h
+++ b/src/3rdparty/v8/src/compiler.h
@@ -35,7 +35,10 @@
namespace v8 {
namespace internal {
+static const int kPrologueOffsetNotSet = -1;
+
class ScriptDataImpl;
+class HydrogenCodeStub;
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
@@ -44,6 +47,7 @@ class CompilationInfo {
CompilationInfo(Handle<Script> script, Zone* zone);
CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone);
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+ CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
@@ -71,10 +75,15 @@ class CompilationInfo {
Handle<JSFunction> closure() const { return closure_; }
Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
Handle<Script> script() const { return script_; }
+ HydrogenCodeStub* code_stub() {return code_stub_; }
v8::Extension* extension() const { return extension_; }
ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
+ int opt_count() const { return opt_count_; }
+ int num_parameters() const;
+ int num_heap_slots() const;
+ Code::Flags flags() const;
void MarkAsEval() {
ASSERT(!is_lazy());
@@ -100,9 +109,39 @@ class CompilationInfo {
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
+
bool is_native() const {
return IsNative::decode(flags_);
}
+
+ bool is_calling() const {
+ return is_deferred_calling() || is_non_deferred_calling();
+ }
+
+ void MarkAsDeferredCalling() {
+ flags_ |= IsDeferredCalling::encode(true);
+ }
+
+ bool is_deferred_calling() const {
+ return IsDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsNonDeferredCalling() {
+ flags_ |= IsNonDeferredCalling::encode(true);
+ }
+
+ bool is_non_deferred_calling() const {
+ return IsNonDeferredCalling::decode(flags_);
+ }
+
+ void MarkAsSavesCallerDoubles() {
+ flags_ |= SavesCallerDoubles::encode(true);
+ }
+
+ bool saves_caller_doubles() const {
+ return SavesCallerDoubles::decode(flags_);
+ }
+
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@@ -153,6 +192,7 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
bool IsOptimizable() const { return mode_ == BASE; }
+ bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id) {
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
@@ -190,6 +230,16 @@ class CompilationInfo {
const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
+ int prologue_offset() const {
+ ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
+ return prologue_offset_;
+ }
+
+ void set_prologue_offset(int prologue_offset) {
+ ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
+ prologue_offset_ = prologue_offset;
+ }
+
private:
Isolate* isolate_;
@@ -201,24 +251,11 @@ class CompilationInfo {
enum Mode {
BASE,
OPTIMIZE,
- NONOPT
+ NONOPT,
+ STUB
};
- void Initialize(Mode mode) {
- mode_ = V8::UseCrankshaft() ? mode : NONOPT;
- ASSERT(!script_.is_null());
- if (script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
- if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
- }
- if (!shared_info_.is_null() && shared_info_->qml_mode()) {
- MarkAsQmlMode();
- }
- set_bailout_reason("unknown");
- }
+ void Initialize(Isolate* isolate, Mode mode, Zone* zone);
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@@ -244,8 +281,16 @@ class CompilationInfo {
// If compiling for debugging produce just full code matching the
// initial mode setting.
class IsCompilingForDebugging: public BitField<bool, 8, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsCalling: public BitField<bool, 9, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsDeferredCalling: public BitField<bool, 10, 1> {};
+ // If the compiled code contains calls that require building a frame
+ class IsNonDeferredCalling: public BitField<bool, 11, 1> {};
+ // If the compiled code saves double caller registers that it clobbers.
+ class SavesCallerDoubles: public BitField<bool, 12, 1> {};
// Qml mode
- class IsQmlMode: public BitField<bool, 9, 1> {};
+ class IsQmlMode: public BitField<bool, 13, 1> {};
unsigned flags_;
@@ -257,6 +302,8 @@ class CompilationInfo {
Scope* scope_;
// The global scope provided as a convenience.
Scope* global_scope_;
+ // For compiled stubs, the stub object
+ HydrogenCodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
@@ -293,6 +340,12 @@ class CompilationInfo {
const char* bailout_reason_;
+ int prologue_offset_;
+
+ // A copy of shared_info()->opt_count() to avoid handle deref
+ // during graph optimization.
+ int opt_count_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -301,6 +354,8 @@ class CompilationInfo {
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
+ INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
+
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
zone_(script->GetIsolate()),
@@ -313,6 +368,10 @@ class CompilationInfoWithZone: public CompilationInfo {
: CompilationInfo(closure, &zone_),
zone_(closure->GetIsolate()),
zone_scope_(&zone_, DELETE_ON_EXIT) {}
+ explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
+ : CompilationInfo(stub, isolate, &zone_),
+ zone_(isolate),
+ zone_scope_(&zone_, DELETE_ON_EXIT) {}
private:
Zone zone_;
@@ -338,7 +397,7 @@ class CompilationHandleScope BASE_EMBEDDED {
class HGraph;
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class LChunk;
// A helper class that calls the three compilation phases in
@@ -370,6 +429,7 @@ class OptimizingCompiler: public ZoneObject {
Status last_status() const { return last_status_; }
CompilationInfo* info() const { return info_; }
+ Isolate* isolate() const { return info()->isolate(); }
MUST_USE_RESULT Status AbortOptimization() {
info_->AbortOptimization();
@@ -380,7 +440,7 @@ class OptimizingCompiler: public ZoneObject {
private:
CompilationInfo* info_;
TypeFeedbackOracle* oracle_;
- HGraphBuilder* graph_builder_;
+ HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
int64_t time_taken_to_create_graph_;
@@ -434,16 +494,17 @@ class Compiler : public AllStatic {
// contains NULL.
// Compile a String source within a context.
- static Handle<SharedFunctionInfo> Compile(Handle<String> source,
- Handle<Object> script_name,
- int line_offset,
- int column_offset,
- Handle<Context> context,
- v8::Extension* extension,
- ScriptDataImpl* pre_data,
- Handle<Object> script_data,
- NativesFlag is_natives_code,
- v8::Script::CompileFlags = v8::Script::Default);
+ static Handle<SharedFunctionInfo> Compile(
+ Handle<String> source,
+ Handle<Object> script_name,
+ int line_offset,
+ int column_offset,
+ Handle<Context> context,
+ v8::Extension* extension,
+ ScriptDataImpl* pre_data,
+ Handle<Object> script_data,
+ NativesFlag is_natives_code,
+ v8::Script::CompileFlags = v8::Script::Default);
// Compile a String source within a context for Eval.
static Handle<SharedFunctionInfo> CompileEval(Handle<String> source,
diff --git a/src/3rdparty/v8/src/contexts.cc b/src/3rdparty/v8/src/contexts.cc
index 662e326..26d8c1a 100644
--- a/src/3rdparty/v8/src/contexts.cc
+++ b/src/3rdparty/v8/src/contexts.cc
@@ -55,6 +55,15 @@ JSBuiltinsObject* Context::builtins() {
}
+Context* Context::global_context() {
+ Context* current = this;
+ while (!current->IsGlobalContext()) {
+ current = current->previous();
+ }
+ return current;
+}
+
+
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
@@ -191,6 +200,10 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
+ case MODULE:
+ *attributes = READ_ONLY;
+ *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
+ break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@@ -349,14 +362,11 @@ void Context::ClearOptimizedFunctions() {
Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
- Handle<Object> result(error_message_for_code_gen_from_strings());
- if (result->IsUndefined()) {
- const char* error =
- "Code generation from strings disallowed for this context";
- Isolate* isolate = Isolate::Current();
- result = isolate->factory()->NewStringFromAscii(i::CStrVector(error));
- }
- return result;
+ Handle<Object> result(error_message_for_code_gen_from_strings(),
+ GetIsolate());
+ if (!result->IsUndefined()) return result;
+ return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector(
+ "Code generation from strings disallowed for this context"));
}
@@ -365,7 +375,7 @@ bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
- if (Isolate::Current()->bootstrapper()->IsActive()) return true;
+ if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
if (!object->IsContext()) return false;
Context* context = Context::cast(object);
return context->IsNativeContext() || context->IsGlobalContext() ||
diff --git a/src/3rdparty/v8/src/contexts.h b/src/3rdparty/v8/src/contexts.h
index 61e6c66..96473df 100644
--- a/src/3rdparty/v8/src/contexts.h
+++ b/src/3rdparty/v8/src/contexts.h
@@ -152,10 +152,11 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
- V(CONTEXT_DATA_INDEX, Object, data) \
+ V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
+ V(SYMBOL_DELEGATE_INDEX, JSObject, symbol_delegate) \
V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
to_complete_property_descriptor) \
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
@@ -284,9 +285,10 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
- CONTEXT_DATA_INDEX,
+ EMBEDDER_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
+ SYMBOL_DELEGATE_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
@@ -353,12 +355,19 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
+ // Get the innermost global context by traversing the context chain.
+ Context* global_context();
+
// Compute the native context by traversing the context chain.
Context* native_context();
- // Predicates for context types. IsNativeContext is defined on Object
+ // Predicates for context types. IsNativeContext is also defined on Object
// because we frequently have to know if arbitrary objects are natives
// contexts.
+ bool IsNativeContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->native_context_map();
+ }
bool IsFunctionContext() {
Map* map = this->map();
return map == map->GetHeap()->function_context_map();
@@ -458,6 +467,9 @@ class Context: public FixedArray {
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
+
+ STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
+ STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/conversions-inl.h b/src/3rdparty/v8/src/conversions-inl.h
index e272fe6..7edaf22 100644
--- a/src/3rdparty/v8/src/conversions-inl.h
+++ b/src/3rdparty/v8/src/conversions-inl.h
@@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache,
}
// Rounding up may cause overflow.
- if ((number & ((int64_t)1 << 53)) != 0) {
+ if ((number & (static_cast<int64_t>(1) << 53)) != 0) {
exponent++;
number >>= 1;
}
@@ -481,9 +481,9 @@ double InternalStringToDouble(UnicodeCache* unicode_cache,
sign = NEGATIVE;
}
- static const char kInfinitySymbol[] = "Infinity";
- if (*current == kInfinitySymbol[0]) {
- if (!SubStringEquals(&current, end, kInfinitySymbol)) {
+ static const char kInfinityString[] = "Infinity";
+ if (*current == kInfinityString[0]) {
+ if (!SubStringEquals(&current, end, kInfinityString)) {
return JunkStringValue();
}
diff --git a/src/3rdparty/v8/src/counters.cc b/src/3rdparty/v8/src/counters.cc
index 811c0aa..7c8265e 100644
--- a/src/3rdparty/v8/src/counters.cc
+++ b/src/3rdparty/v8/src/counters.cc
@@ -81,17 +81,22 @@ void HistogramTimer::Start() {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
+ if (FLAG_log_internal_timer_events) {
+ LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_));
+ }
}
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
-
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds);
}
+ if (FLAG_log_internal_timer_events) {
+ LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_));
+ }
}
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/cpu-profiler.cc b/src/3rdparty/v8/src/cpu-profiler.cc
index 3cbac77..3d5e697 100644
--- a/src/3rdparty/v8/src/cpu-profiler.cc
+++ b/src/3rdparty/v8/src/cpu-profiler.cc
@@ -45,13 +45,18 @@ static const int kTickSamplesBufferChunksCount = 16;
static const int kProfilerStackSize = 64 * KB;
-ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
+ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
+ sampler_(sampler),
running_(true),
+ period_in_useconds_(period_in_useconds),
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
- kTickSamplesBufferChunksCount),
+ kTickSamplesBufferChunksCount,
+ !Sampler::CanSampleOnProfilerEventsProcessorThread()),
enqueue_order_(0) {
}
@@ -239,17 +244,42 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
}
+void ProfilerEventsProcessor::ProcessEventsAndDoSample(
+ unsigned* dequeue_order) {
+ int64_t stop_time = OS::Ticks() + period_in_useconds_;
+ // Keep processing existing events until we need to do next sample.
+ while (OS::Ticks() < stop_time) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ }
+ // Schedule next sample. sampler_ is NULL in tests.
+ if (sampler_)
+ sampler_->DoSample();
+}
+
+
+void ProfilerEventsProcessor::ProcessEventsAndYield(unsigned* dequeue_order) {
+ if (ProcessTicks(*dequeue_order)) {
+ // All ticks of the current dequeue_order are processed,
+ // proceed to the next code event.
+ ProcessCodeEvent(dequeue_order);
+ }
+ YieldCPU();
+}
+
+
void ProfilerEventsProcessor::Run() {
unsigned dequeue_order = 0;
while (running_) {
- // Process ticks until we have any.
- if (ProcessTicks(dequeue_order)) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent(&dequeue_order);
+ if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
+ ProcessEventsAndDoSample(&dequeue_order);
+ } else {
+ ProcessEventsAndYield(&dequeue_order);
}
- YieldCPU();
}
// Process remaining tick events.
@@ -486,13 +516,15 @@ void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
// Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(generator_);
- NoBarrier_Store(&is_profiling_, true);
- processor_->Start();
+ processor_ = new ProfilerEventsProcessor(generator_,
+ sampler,
+ FLAG_cpu_profiler_sampling_period);
+ is_profiling_ = true;
// Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -505,12 +537,13 @@ void CpuProfiler::StartProcessorIfNotStarted() {
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
+ processor_->Start();
}
}
@@ -548,11 +581,12 @@ void CpuProfiler::StopProcessor() {
Logger* logger = Isolate::Current()->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
+ sampler->SetHasProcessingThread(false);
if (need_to_stop_sampler_) {
sampler->Stop();
need_to_stop_sampler_ = false;
}
- NoBarrier_Store(&is_profiling_, false);
+ is_profiling_ = false;
processor_->Stop();
processor_->Join();
delete processor_;
diff --git a/src/3rdparty/v8/src/cpu-profiler.h b/src/3rdparty/v8/src/cpu-profiler.h
index 9cd4484..187cbbf 100644
--- a/src/3rdparty/v8/src/cpu-profiler.h
+++ b/src/3rdparty/v8/src/cpu-profiler.h
@@ -124,7 +124,9 @@ class TickSampleEventRecord {
// methods called by event producers: VM and stack sampler threads.
class ProfilerEventsProcessor : public Thread {
public:
- explicit ProfilerEventsProcessor(ProfileGenerator* generator);
+ ProfilerEventsProcessor(ProfileGenerator* generator,
+ Sampler* sampler,
+ int period_in_useconds);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -173,11 +175,16 @@ class ProfilerEventsProcessor : public Thread {
// Called from events processing thread (Run() method.)
bool ProcessCodeEvent(unsigned* dequeue_order);
bool ProcessTicks(unsigned dequeue_order);
+ void ProcessEventsAndDoSample(unsigned* dequeue_order);
+ void ProcessEventsAndYield(unsigned* dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
ProfileGenerator* generator_;
+ Sampler* sampler_;
bool running_;
+ // Sampling period in microseconds.
+ const int period_in_useconds_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
@@ -245,11 +252,9 @@ class CpuProfiler {
static void SetterCallbackEvent(String* name, Address entry_point);
static void SharedFunctionInfoMoveEvent(Address from, Address to);
- // TODO(isolates): this doesn't have to use atomics anymore.
-
static INLINE(bool is_profiling(Isolate* isolate)) {
CpuProfiler* profiler = isolate->cpu_profiler();
- return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
+ return profiler != NULL && profiler->is_profiling_;
}
private:
@@ -271,7 +276,7 @@ class CpuProfiler {
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
bool need_to_stop_sampler_;
- Atomic32 is_profiling_;
+ bool is_profiling_;
private:
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
diff --git a/src/3rdparty/v8/src/d8-debug.cc b/src/3rdparty/v8/src/d8-debug.cc
index de0faa8..f044328 100644
--- a/src/3rdparty/v8/src/d8-debug.cc
+++ b/src/3rdparty/v8/src/d8-debug.cc
@@ -273,7 +273,7 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) {
- Locker lock;
+ Locker lock(v8::Isolate::GetCurrent());
HandleScope scope;
// Print the event details.
@@ -302,7 +302,7 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) {
- Locker lock;
+ Locker lock(v8::Isolate::GetCurrent());
HandleScope scope;
// Convert the debugger command to a JSON debugger request.
diff --git a/src/3rdparty/v8/src/d8-readline.cc b/src/3rdparty/v8/src/d8-readline.cc
index ed7721c..8989263 100644
--- a/src/3rdparty/v8/src/d8-readline.cc
+++ b/src/3rdparty/v8/src/d8-readline.cc
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
#include <cstdio> // NOLINT
+#include <string.h> // NOLINT
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
@@ -35,7 +35,6 @@
#include "d8.h"
-
// There are incompatibilities between different versions and different
// implementations of readline. This smooths out one known incompatibility.
#if RL_READLINE_VERSION >= 0x0500
@@ -58,8 +57,10 @@ class ReadLineEditor: public LineEditor {
static const int kMaxHistoryEntries;
private:
+#ifndef V8_SHARED
static char** AttemptedCompletion(const char* text, int start, int end);
static char* CompletionGenerator(const char* text, int state);
+#endif // V8_SHARED
static char kWordBreakCharacters[];
};
@@ -76,7 +77,15 @@ const int ReadLineEditor::kMaxHistoryEntries = 1000;
bool ReadLineEditor::Open() {
rl_initialize();
+
+#ifdef V8_SHARED
+ // Don't do completion on shared library mode
+ // http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24
+ rl_bind_key('\t', rl_insert);
+#else
rl_attempted_completion_function = AttemptedCompletion;
+#endif // V8_SHARED
+
rl_completer_word_break_characters = kWordBreakCharacters;
rl_bind_key('\t', rl_complete);
using_history();
@@ -122,6 +131,7 @@ void ReadLineEditor::AddHistory(const char* str) {
}
+#ifndef V8_SHARED
char** ReadLineEditor::AttemptedCompletion(const char* text,
int start,
int end) {
@@ -155,6 +165,7 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) {
return NULL;
}
}
+#endif // V8_SHARED
} // namespace v8
diff --git a/src/3rdparty/v8/src/d8.cc b/src/3rdparty/v8/src/d8.cc
index b3b1bb8..2d30a1c 100644
--- a/src/3rdparty/v8/src/d8.cc
+++ b/src/3rdparty/v8/src/d8.cc
@@ -67,42 +67,84 @@
namespace v8 {
-LineEditor *LineEditor::first_ = NULL;
-
-LineEditor::LineEditor(Type type, const char* name)
- : type_(type),
- name_(name),
- next_(first_) {
- first_ = this;
+static Handle<Value> Throw(const char* message) {
+ return ThrowException(String::New(message));
}
-LineEditor* LineEditor::Get() {
- LineEditor* current = first_;
- LineEditor* best = current;
- while (current != NULL) {
- if (current->type_ > best->type_)
- best = current;
- current = current->next_;
+// TODO(rossberg): should replace these by proper uses of HasInstance,
+// once we figure out a good way to make the templates global.
+const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
+const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
+
+
+#define FOR_EACH_SYMBOL(V) \
+ V(ArrayBuffer, "ArrayBuffer") \
+ V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
+ V(ArrayMarkerPropName, kArrayMarkerPropName) \
+ V(buffer, "buffer") \
+ V(byteLength, "byteLength") \
+ V(byteOffset, "byteOffset") \
+ V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
+ V(length, "length")
+
+
+class Symbols {
+ public:
+ explicit Symbols(Isolate* isolate) : isolate_(isolate) {
+ HandleScope scope;
+#define INIT_SYMBOL(name, value) \
+ name##_ = Persistent<String>::New(isolate, String::NewSymbol(value));
+ FOR_EACH_SYMBOL(INIT_SYMBOL)
+#undef INIT_SYMBOL
+ isolate->SetData(this);
+ }
+
+ ~Symbols() {
+#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_);
+ FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
+#undef DISPOSE_SYMBOL
+ isolate_->SetData(NULL); // Not really needed, just to be sure...
}
- return best;
+
+#define DEFINE_SYMBOL_GETTER(name, value) \
+ static Persistent<String> name(Isolate* isolate) { \
+ return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
+ }
+ FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
+#undef DEFINE_SYMBOL_GETTER
+
+ private:
+ Isolate* isolate_;
+#define DEFINE_MEMBER(name, value) Persistent<String> name##_;
+ FOR_EACH_SYMBOL(DEFINE_MEMBER)
+#undef DEFINE_MEMBER
+};
+
+
+LineEditor *LineEditor::current_ = NULL;
+
+
+LineEditor::LineEditor(Type type, const char* name)
+ : type_(type), name_(name) {
+ if (current_ == NULL || current_->type_ < type) current_ = this;
}
class DumbLineEditor: public LineEditor {
public:
- DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
+ explicit DumbLineEditor(Isolate* isolate)
+ : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
virtual Handle<String> Prompt(const char* prompt);
+ private:
+ Isolate* isolate_;
};
-static DumbLineEditor dumb_line_editor;
-
-
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
- return Shell::ReadFromStdin();
+ return Shell::ReadFromStdin(isolate_);
}
@@ -115,7 +157,6 @@ i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
-LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
@@ -213,36 +254,36 @@ Handle<Value> Shell::Write(const Arguments& args) {
Exit(1);
}
}
- return Undefined();
+ return Undefined(args.GetIsolate());
}
Handle<Value> Shell::EnableProfiler(const Arguments& args) {
V8::ResumeProfiler();
- return Undefined();
+ return Undefined(args.GetIsolate());
}
Handle<Value> Shell::DisableProfiler(const Arguments& args) {
V8::PauseProfiler();
- return Undefined();
+ return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Read(const Arguments& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
- Handle<String> source = ReadFile(*file);
+ Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
return source;
}
-Handle<String> Shell::ReadFromStdin() {
+Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
static const int kBufferSize = 256;
char buffer[kBufferSize];
Handle<String> accumulator = String::New("");
@@ -253,7 +294,7 @@ Handle<String> Shell::ReadFromStdin() {
// If fgets gets an error, just give up.
char* input = NULL;
{ // Release lock for blocking input.
- Unlocker unlock(Isolate::GetCurrent());
+ Unlocker unlock(isolate);
input = fgets(buffer, kBufferSize, stdin);
}
if (input == NULL) return Handle<String>();
@@ -277,17 +318,17 @@ Handle<Value> Shell::Load(const Arguments& args) {
HandleScope handle_scope;
String::Utf8Value file(args[i]);
if (*file == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
- Handle<String> source = ReadFile(*file);
+ Handle<String> source = ReadFile(args.GetIsolate(), *file);
if (source.IsEmpty()) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
if (!ExecuteString(source, String::New(*file), false, true)) {
- return ThrowException(String::New("Error executing file"));
+ return Throw("Error executing file");
}
}
- return Undefined();
+ return Undefined(args.GetIsolate());
}
static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) {
@@ -314,7 +355,7 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
- ThrowException(String::New("Array length must not be negative."));
+ Throw("Array length must not be negative.");
return 0;
}
@@ -323,41 +364,38 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) {
- ThrowException(
- String::New("Array length exceeds maximum length."));
+ Throw("Array length exceeds maximum length.");
}
return raw_value;
}
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
-Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
+Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
+ Handle<Object> buffer,
int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
- return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
+ return Throw("ArrayBuffer exceeds maximum size (2G)");
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
- return ThrowException(String::New("Memory allocation failed"));
+ return Throw("Memory allocation failed");
}
memset(data, 0, length);
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
- Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
- persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_array.MarkIndependent();
+ buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
+ Persistent<Object> persistent_array =
+ Persistent<Object>::New(isolate, buffer);
+ persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback);
+ persistent_array.MarkIndependent(isolate);
V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
- buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
+ buffer->Set(Symbols::byteLength(isolate),
+ Int32::New(length, isolate),
+ ReadOnly);
return buffer;
}
@@ -373,18 +411,18 @@ Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
}
if (args.Length() == 0) {
- return ThrowException(
- String::New("ArrayBuffer constructor must have one argument"));
+ return Throw("ArrayBuffer constructor must have one argument");
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
- return CreateExternalArrayBuffer(args.This(), length);
+ return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
}
-Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
+Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
+ Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
@@ -400,12 +438,22 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
- array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
- array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
- array->Set(String::New("length"), Int32::New(length), ReadOnly);
- array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
- array->Set(String::New("buffer"), buffer, ReadOnly);
+ array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate),
+ Int32::New(type, isolate));
+ array->Set(Symbols::byteLength(isolate),
+ Int32::New(byteLength, isolate),
+ ReadOnly);
+ array->Set(Symbols::byteOffset(isolate),
+ Int32::New(byteOffset, isolate),
+ ReadOnly);
+ array->Set(Symbols::length(isolate),
+ Int32::New(length, isolate),
+ ReadOnly);
+ array->Set(Symbols::BYTES_PER_ELEMENT(isolate),
+ Int32::New(element_size, isolate));
+ array->Set(Symbols::buffer(isolate),
+ buffer,
+ ReadOnly);
return array;
}
@@ -414,6 +462,7 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
+ Isolate* isolate = args.GetIsolate();
if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
@@ -439,16 +488,15 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t byteOffset;
bool init_from_array = false;
if (args.Length() == 0) {
- return ThrowException(
- String::New("Array constructor must have at least one argument"));
+ return Throw("Array constructor must have at least one argument");
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
- String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
+ Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) {
// Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
- convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
+ convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
@@ -457,11 +505,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
- return ThrowException(String::New("byteOffset out of bounds"));
+ return Throw("byteOffset out of bounds");
}
if (byteOffset % element_size != 0) {
- return ThrowException(
- String::New("byteOffset must be multiple of element size"));
+ return Throw("byteOffset must be multiple of element size");
}
}
@@ -469,23 +516,23 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteLength = bufferLength - byteOffset;
length = byteLength / element_size;
if (byteLength % element_size != 0) {
- return ThrowException(
- String::New("buffer size must be multiple of element size"));
+ return Throw("buffer size must be multiple of element size");
}
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
- return ThrowException(String::New("length out of bounds"));
+ return Throw("length out of bounds");
}
}
} else {
if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(String::New("length"))) {
+ args[0]->ToObject()->Has(Symbols::length(isolate))) {
// Construct from array.
- length = convertToUint(
- args[0]->ToObject()->Get(String::New("length")), &try_catch);
+ Local<Value> value = args[0]->ToObject()->Get(Symbols::length(isolate));
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ length = convertToUint(value, &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true;
} else {
@@ -497,21 +544,26 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
+ Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
- Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
+ Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
1, buffer_args);
if (try_catch.HasCaught()) return result;
buffer = result->ToObject();
}
- Handle<Object> array = CreateExternalArray(
- args.This(), buffer, type, length, byteLength, byteOffset, element_size);
+ Handle<Object> array =
+ CreateExternalArray(isolate, args.This(), buffer, type, length,
+ byteLength, byteOffset, element_size);
if (init_from_array) {
Handle<Object> init = args[0]->ToObject();
- for (int i = 0; i < length; ++i) array->Set(i, init->Get(i));
+ for (int i = 0; i < length; ++i) {
+ Local<Value> value = init->Get(i);
+ if (try_catch.HasCaught()) return try_catch.ReThrow();
+ array->Set(i, value);
+ }
}
return array;
@@ -522,25 +574,23 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'slice' invoked on non-object receiver"));
+ return Throw("'slice' invoked on non-object receiver");
}
+ Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
Local<Value> marker =
- self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
+ self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate));
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'slice' invoked on wrong receiver type"));
+ return Throw("'slice' invoked on wrong receiver type");
}
int32_t length =
- convertToUint(self->Get(String::New("byteLength")), &try_catch);
+ convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'slice' must have at least one argument"));
+ return Throw("'slice' must have at least one argument");
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -561,7 +611,7 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
}
Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
- Handle<Value> new_args[] = { Uint32::New(end - begin) };
+ Handle<Value> new_args[] = { Uint32::New(end - begin, isolate) };
Handle<Value> result = constructor->NewInstance(1, new_args);
if (try_catch.HasCaught()) return result;
Handle<Object> buffer = result->ToObject();
@@ -579,32 +629,31 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'subarray' invoked on non-object receiver"));
+ return Throw("'subarray' invoked on non-object receiver");
}
+ Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ Local<Value> marker =
+ self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'subarray' invoked on wrong receiver type"));
+ return Throw("'subarray' invoked on wrong receiver type");
}
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
+ convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'subarray' must have at least one argument"));
+ return Throw("'subarray' must have at least one argument");
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -629,7 +678,7 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
Local<Function> constructor = Local<Function>::Cast(self->GetConstructor());
Handle<Value> construct_args[] = {
- buffer, Uint32::New(byteOffset), Uint32::New(length)
+ buffer, Uint32::New(byteOffset, isolate), Uint32::New(length, isolate)
};
return constructor->NewInstance(3, construct_args);
}
@@ -639,35 +688,33 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return ThrowException(
- String::New("'set' invoked on non-object receiver"));
+ return Throw("'set' invoked on non-object receiver");
}
+ Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
- Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
+ Local<Value> marker =
+ self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
if (marker.IsEmpty()) {
- return ThrowException(
- String::New("'set' invoked on wrong receiver type"));
+ return Throw("'set' invoked on wrong receiver type");
}
int32_t length =
- convertToUint(self->Get(String::New("length")), &try_catch);
+ convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return ThrowException(
- String::New("'set' must have at least one argument"));
+ return Throw("'set' must have at least one argument");
}
if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(String::New("length"))) {
- return ThrowException(
- String::New("'set' invoked with non-array argument"));
+ !args[0]->ToObject()->Has(Symbols::length(isolate))) {
+ return Throw("'set' invoked with non-array argument");
}
Handle<Object> source = args[0]->ToObject();
int32_t source_length =
- convertToUint(source->Get(String::New("length")), &try_catch);
+ convertToUint(source->Get(Symbols::length(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset;
@@ -678,31 +725,32 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (offset + source_length > length) {
- return ThrowException(String::New("offset or source length out of bounds"));
+ return Throw("offset or source length out of bounds");
}
int32_t source_element_size;
- if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
+ if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) {
source_element_size = 0;
} else {
source_element_size =
- convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
+ convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)),
+ &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
+ source->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@@ -718,10 +766,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
}
} else {
// Need to copy element-wise to make the right conversions.
- Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
+ Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(String::New("buffer"))->ToObject();
+ source->Get(Symbols::buffer(isolate))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) {
@@ -729,10 +777,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
// This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset =
- convertToUint(self->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(String::New("byteOffset")), &try_catch);
+ convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right.
@@ -772,17 +820,19 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
}
}
- return Undefined();
+ return Undefined(args.GetIsolate());
}
-void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
+void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate,
+ Persistent<Value> object,
+ void* data) {
HandleScope scope;
int32_t length =
- object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
+ object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
- object.Dispose();
+ object.Dispose(isolate);
}
@@ -835,18 +885,16 @@ Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) {
Handle<Value> Shell::Yield(const Arguments& args) {
- v8::Unlocker unlocker;
- return Undefined();
+ v8::Unlocker unlocker(args.GetIsolate());
+ return Undefined(args.GetIsolate());
}
Handle<Value> Shell::Quit(const Arguments& args) {
int exit_code = args[0]->Int32Value();
-#ifndef V8_SHARED
OnExit();
-#endif // V8_SHARED
exit(exit_code);
- return Undefined();
+ return Undefined(args.GetIsolate());
}
@@ -1044,13 +1092,13 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
}
-void Shell::InstallUtilityScript() {
- Locker lock;
+void Shell::InstallUtilityScript(Isolate* isolate) {
+ Locker lock(isolate);
HandleScope scope;
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
- utility_context_->SetSecurityToken(Undefined());
- evaluation_context_->SetSecurityToken(Undefined());
+ utility_context_->SetSecurityToken(Undefined(isolate));
+ evaluation_context_->SetSecurityToken(Undefined(isolate));
Context::Scope utility_scope(utility_context_);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1145,7 +1193,7 @@ Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
}
-Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@@ -1165,7 +1213,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
// Bind the handlers for external arrays.
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(String::New("ArrayBuffer"),
+ global_template->Set(Symbols::ArrayBuffer(isolate),
CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr);
@@ -1186,12 +1234,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
global_template->Set(String::New("Uint8ClampedArray"),
CreateArrayTemplate(Uint8ClampedArray), attr);
-#ifdef LIVE_OBJECT_LIST
- global_template->Set(String::New("lol_is_enabled"), True());
-#else
- global_template->Set(String::New("lol_is_enabled"), False());
-#endif
-
#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
AddOSMethods(os_templ);
@@ -1202,7 +1244,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
}
-void Shell::Initialize() {
+void Shell::Initialize(Isolate* isolate) {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
@@ -1223,12 +1265,15 @@ void Shell::Initialize() {
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
#endif // V8_SHARED
- if (options.test_shell) return;
+}
+
+void Shell::InitializeDebugger(Isolate* isolate) {
+ if (options.test_shell) return;
#ifndef V8_SHARED
- Locker lock;
+ Locker lock(isolate);
HandleScope scope;
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1242,13 +1287,13 @@ void Shell::Initialize() {
}
-Persistent<Context> Shell::CreateEvaluationContext() {
+Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
Persistent<Context> context = Context::New(NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -1291,10 +1336,13 @@ int CompareKeys(const void* a, const void* b) {
return strcmp(static_cast<const CounterAndKey*>(a)->key,
static_cast<const CounterAndKey*>(b)->key);
}
+#endif // V8_SHARED
void Shell::OnExit() {
- if (console != NULL) console->Close();
+ LineEditor* line_editor = LineEditor::Get();
+ if (line_editor) line_editor->Close();
+#ifndef V8_SHARED
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1327,10 +1375,12 @@ void Shell::OnExit() {
"-------------+\n");
delete [] counters;
}
+ delete context_mutex_;
delete counters_file_;
delete counter_map_;
-}
#endif // V8_SHARED
+}
+
static FILE* FOpen(const char* path, const char* mode) {
@@ -1354,9 +1404,9 @@ static FILE* FOpen(const char* path, const char* mode) {
}
-static char* ReadChars(const char* name, int* size_out) {
+static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
// Release the V8 lock while reading files.
- v8::Unlocker unlocker(Isolate::GetCurrent());
+ v8::Unlocker unlocker(isolate);
FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL;
@@ -1381,24 +1431,27 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- return ThrowException(String::New("Error loading file"));
+ return Throw("Error loading file");
}
- uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
+ uint8_t* data = reinterpret_cast<uint8_t*>(
+ ReadChars(args.GetIsolate(), *filename, &length));
if (data == NULL) {
- return ThrowException(String::New("Error reading file"));
+ return Throw("Error reading file");
}
+ Isolate* isolate = args.GetIsolate();
Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
- Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
- persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
- persistent_buffer.MarkIndependent();
+ buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
+ Persistent<Object> persistent_buffer =
+ Persistent<Object>::New(isolate, buffer);
+ persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback);
+ persistent_buffer.MarkIndependent(isolate);
V8::AdjustAmountOfExternalAllocatedMemory(length);
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
- buffer->Set(String::New("byteLength"),
- Int32::New(static_cast<int32_t>(length)), ReadOnly);
+ buffer->Set(Symbols::byteLength(isolate),
+ Int32::New(static_cast<int32_t>(length), isolate), ReadOnly);
return buffer;
}
@@ -1427,22 +1480,22 @@ static char* ReadWord(char* data) {
// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(const char* name) {
+Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
- char* chars = ReadChars(name, &size);
+ char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
- Handle<String> result = String::New(chars);
+ Handle<String> result = String::New(chars, size);
delete[] chars;
return result;
}
-void Shell::RunShell() {
- Locker locker;
+void Shell::RunShell(Isolate* isolate) {
+ Locker locker(isolate);
Context::Scope context_scope(evaluation_context_);
HandleScope outer_scope;
Handle<String> name = String::New("(d8)");
- console = LineEditor::Get();
+ LineEditor* console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open();
while (true) {
@@ -1459,9 +1512,9 @@ void Shell::RunShell() {
class ShellThread : public i::Thread {
public:
// Takes ownership of the underlying char array of |files|.
- ShellThread(int no, char* files)
+ ShellThread(Isolate* isolate, char* files)
: Thread("d8:ShellThread"),
- no_(no), files_(files) { }
+ isolate_(isolate), files_(files) { }
~ShellThread() {
delete[] files_;
@@ -1469,7 +1522,7 @@ class ShellThread : public i::Thread {
virtual void Run();
private:
- int no_;
+ Isolate* isolate_;
char* files_;
};
@@ -1487,9 +1540,10 @@ void ShellThread::Run() {
}
// Prepare the context for this thread.
- Locker locker;
+ Locker locker(isolate_);
HandleScope outer_scope;
- Persistent<Context> thread_context = Shell::CreateEvaluationContext();
+ Persistent<Context> thread_context =
+ Shell::CreateEvaluationContext(isolate_);
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@@ -1502,7 +1556,7 @@ void ShellThread::Run() {
continue;
}
- Handle<String> str = Shell::ReadFile(filename);
+ Handle<String> str = Shell::ReadFile(isolate_, filename);
if (str.IsEmpty()) {
printf("File '%s' not found\n", filename);
Shell::Exit(1);
@@ -1511,7 +1565,7 @@ void ShellThread::Run() {
Shell::ExecuteString(str, String::New(filename), false, false);
}
- thread_context.Dispose();
+ thread_context.Dispose(thread_context->GetIsolate());
ptr = next_line;
}
}
@@ -1530,7 +1584,7 @@ SourceGroup::~SourceGroup() {
}
-void SourceGroup::Execute() {
+void SourceGroup::Execute(Isolate* isolate) {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@@ -1548,7 +1602,7 @@ void SourceGroup::Execute() {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = String::New(arg);
- Handle<String> source = ReadFile(arg);
+ Handle<String> source = ReadFile(isolate, arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
@@ -1561,9 +1615,9 @@ void SourceGroup::Execute() {
}
-Handle<String> SourceGroup::ReadFile(const char* name) {
+Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
int size;
- char* chars = ReadChars(name, &size);
+ char* chars = ReadChars(isolate, name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;
@@ -1589,12 +1643,13 @@ void SourceGroup::ExecuteInThread() {
Isolate::Scope iscope(isolate);
Locker lock(isolate);
HandleScope scope;
- Persistent<Context> context = Shell::CreateEvaluationContext();
+ Symbols symbols(isolate);
+ Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
{
Context::Scope cscope(context);
- Execute();
+ Execute(isolate);
}
- context.Dispose();
+ context.Dispose(isolate);
if (Shell::options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -1760,21 +1815,21 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
-int Shell::RunMain(int argc, char* argv[]) {
+int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
#ifndef V8_SHARED
i::List<i::Thread*> threads(1);
if (options.parallel_files != NULL) {
for (int i = 0; i < options.num_parallel_files; i++) {
char* files = NULL;
- { Locker lock(Isolate::GetCurrent());
+ { Locker lock(isolate);
int size = 0;
- files = ReadChars(options.parallel_files[i], &size);
+ files = ReadChars(isolate, options.parallel_files[i], &size);
}
if (files == NULL) {
printf("File list '%s' not found\n", options.parallel_files[i]);
Exit(1);
}
- ShellThread* thread = new ShellThread(threads.length(), files);
+ ShellThread* thread = new ShellThread(isolate, files);
thread->Start();
threads.Add(thread);
}
@@ -1784,9 +1839,9 @@ int Shell::RunMain(int argc, char* argv[]) {
}
#endif // V8_SHARED
{ // NOLINT
- Locker lock;
+ Locker lock(isolate);
HandleScope scope;
- Persistent<Context> context = CreateEvaluationContext();
+ Persistent<Context> context = CreateEvaluationContext(isolate);
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
@@ -1794,16 +1849,16 @@ int Shell::RunMain(int argc, char* argv[]) {
// If the interactive debugger is enabled make sure to activate
// it before running the files passed on the command line.
if (i::FLAG_debugger) {
- InstallUtilityScript();
+ InstallUtilityScript(isolate);
}
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
}
{
Context::Scope cscope(context);
- options.isolate_sources[0].Execute();
+ options.isolate_sources[0].Execute(isolate);
}
if (!options.last_run) {
- context.Dispose();
+ context.Dispose(isolate);
if (options.send_idle_notification) {
const int kLongIdlePauseInMs = 1000;
V8::ContextDisposedNotification();
@@ -1832,7 +1887,7 @@ int Shell::RunMain(int argc, char* argv[]) {
}
if (threads.length() > 0 && options.use_preemption) {
- Locker lock;
+ Locker lock(isolate);
Locker::StopPreemption();
}
#endif // V8_SHARED
@@ -1842,64 +1897,66 @@ int Shell::RunMain(int argc, char* argv[]) {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
- Initialize();
-
int result = 0;
- if (options.stress_opt || options.stress_deopt) {
- Testing::SetStressRunType(
- options.stress_opt ? Testing::kStressTypeOpt
- : Testing::kStressTypeDeopt);
- int stress_runs = Testing::GetStressRuns();
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
- Testing::PrepareStressRun(i);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(argc, argv);
- }
- printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
+ Isolate* isolate = Isolate::GetCurrent();
+ DumbLineEditor dumb_line_editor(isolate);
+ {
+ Initialize(isolate);
+ Symbols symbols(isolate);
+ InitializeDebugger(isolate);
+
+ if (options.stress_opt || options.stress_deopt) {
+ Testing::SetStressRunType(options.stress_opt
+ ? Testing::kStressTypeOpt
+ : Testing::kStressTypeDeopt);
+ int stress_runs = Testing::GetStressRuns();
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ Testing::PrepareStressRun(i);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(isolate, argc, argv);
+ }
+ printf("======== Full Deoptimization =======\n");
+ Testing::DeoptimizeAll();
#if !defined(V8_SHARED)
- } else if (i::FLAG_stress_runs > 0) {
- int stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1, stress_runs);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(argc, argv);
- }
+ } else if (i::FLAG_stress_runs > 0) {
+ int stress_runs = i::FLAG_stress_runs;
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(isolate, argc, argv);
+ }
#endif
- } else {
- result = RunMain(argc, argv);
- }
+ } else {
+ result = RunMain(isolate, argc, argv);
+ }
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // Run remote debugger if requested, but never on --test
- if (i::FLAG_remote_debugger && !options.test_shell) {
- InstallUtilityScript();
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
+ // Run remote debugger if requested, but never on --test
+ if (i::FLAG_remote_debugger && !options.test_shell) {
+ InstallUtilityScript(isolate);
+ RunRemoteDebugger(i::FLAG_debugger_port);
+ return 0;
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- // Run interactive shell if explicitly requested or if no script has been
- // executed, but never on --test
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
- if (( options.interactive_shell
- || !options.script_executed )
- && !options.test_shell ) {
+ if (( options.interactive_shell || !options.script_executed )
+ && !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (!i::FLAG_debugger) {
- InstallUtilityScript();
- }
+ if (!i::FLAG_debugger) {
+ InstallUtilityScript(isolate);
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- RunShell();
+ RunShell(isolate);
+ }
}
-
V8::Dispose();
-#ifndef V8_SHARED
OnExit();
-#endif // V8_SHARED
return result;
}
diff --git a/src/3rdparty/v8/src/d8.gyp b/src/3rdparty/v8/src/d8.gyp
index a8361e6..cce8f2a 100644
--- a/src/3rdparty/v8/src/d8.gyp
+++ b/src/3rdparty/v8/src/d8.gyp
@@ -45,6 +45,10 @@
'd8.cc',
],
'conditions': [
+ [ 'console=="readline"', {
+ 'libraries': [ '-lreadline', ],
+ 'sources': [ 'd8-readline.cc' ],
+ }],
[ 'component!="shared_library"', {
'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
'conditions': [
@@ -57,10 +61,6 @@
'd8_js2c',
],
}],
- [ 'console=="readline"', {
- 'libraries': [ '-lreadline', ],
- 'sources': [ 'd8-readline.cc' ],
- }],
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android")', {
'sources': [ 'd8-posix.cc', ]
diff --git a/src/3rdparty/v8/src/d8.h b/src/3rdparty/v8/src/d8.h
index a62a81f..f3b3fa1 100644
--- a/src/3rdparty/v8/src/d8.h
+++ b/src/3rdparty/v8/src/d8.h
@@ -128,12 +128,11 @@ class LineEditor {
virtual void AddHistory(const char* str) { }
const char* name() { return name_; }
- static LineEditor* Get();
+ static LineEditor* Get() { return current_; }
private:
Type type_;
const char* name_;
- LineEditor* next_;
- static LineEditor* first_;
+ static LineEditor* current_;
};
@@ -158,7 +157,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
- void Execute();
+ void Execute(Isolate* isolate);
#ifndef V8_SHARED
void StartExecuteInThread();
@@ -187,7 +186,7 @@ class SourceGroup {
#endif // V8_SHARED
void ExitShell(int exit_code);
- Handle<String> ReadFile(const char* name);
+ Handle<String> ReadFile(Isolate* isolate, const char* name);
const char** argv_;
int begin_offset_;
@@ -272,16 +271,16 @@ class Shell : public i::AllStatic {
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
- static Handle<String> ReadFile(const char* name);
- static Persistent<Context> CreateEvaluationContext();
- static int RunMain(int argc, char* argv[]);
+ static Handle<String> ReadFile(Isolate* isolate, const char* name);
+ static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
+ static int RunMain(Isolate* isolate, int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
+ static void OnExit();
#ifndef V8_SHARED
static Handle<Array> GetCompletions(Handle<String> text,
Handle<String> full);
- static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
int min,
@@ -310,9 +309,9 @@ class Shell : public i::AllStatic {
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
- static Handle<String> ReadFromStdin();
+ static Handle<String> ReadFromStdin(Isolate* isolate);
static Handle<Value> ReadLine(const Arguments& args) {
- return ReadFromStdin();
+ return ReadFromStdin(args.GetIsolate());
}
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
@@ -365,7 +364,6 @@ class Shell : public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
- static LineEditor* console;
static const char* kPrompt;
static ShellOptions options;
@@ -382,17 +380,20 @@ class Shell : public i::AllStatic {
static i::Mutex* context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
- static void InstallUtilityScript();
+ static void InstallUtilityScript(Isolate* isolate);
#endif // V8_SHARED
- static void Initialize();
- static void RunShell();
+ static void Initialize(Isolate* isolate);
+ static void InitializeDebugger(Isolate* isolate);
+ static void RunShell(Isolate* isolate);
static bool SetOptions(int argc, char* argv[]);
- static Handle<ObjectTemplate> CreateGlobalTemplate();
+ static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
- static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
+ static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
+ Handle<Object> buffer,
int32_t size);
- static Handle<Object> CreateExternalArray(Handle<Object> array,
+ static Handle<Object> CreateExternalArray(Isolate* isolate,
+ Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
@@ -402,7 +403,9 @@ class Shell : public i::AllStatic {
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size);
- static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
+ static void ExternalArrayWeakCallback(Isolate* isolate,
+ Persistent<Value> object,
+ void* data);
};
diff --git a/src/3rdparty/v8/src/d8.js b/src/3rdparty/v8/src/d8.js
index 819135a..3cb1819 100644
--- a/src/3rdparty/v8/src/d8.js
+++ b/src/3rdparty/v8/src/d8.js
@@ -123,10 +123,6 @@ Debug.State = {
var trace_compile = false; // Tracing all compile events?
var trace_debug_json = false; // Tracing all debug json packets?
var last_cmd = '';
-//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
-var lol_next_dump_index = 0;
-var kDefaultLolLinesToPrintAtATime = 10;
-var kMaxLolLinesToPrintAtATime = 1000;
var repeat_cmd_line = '';
var is_running = true;
// Global variable used to store whether a handle was requested.
@@ -507,13 +503,6 @@ function DebugRequest(cmd_line) {
this.request_ = void 0;
break;
- case 'liveobjectlist':
- case 'lol':
- if (lol_is_enabled) {
- this.request_ = this.lolToJSONRequest_(args, is_repeating);
- break;
- }
-
default:
throw new Error('Unknown command "' + cmd + '"');
}
@@ -558,53 +547,10 @@ DebugRequest.prototype.createRequest = function(command) {
};
-// Note: we use detected command repetition as a signal for continuation here.
-DebugRequest.prototype.createLOLRequest = function(command,
- start_index,
- lines_to_dump,
- is_continuation) {
- if (is_continuation) {
- start_index = lol_next_dump_index;
- }
-
- if (lines_to_dump) {
- lines_to_dump = parseInt(lines_to_dump);
- } else {
- lines_to_dump = kDefaultLolLinesToPrintAtATime;
- }
- if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
- lines_to_dump = kMaxLolLinesToPrintAtATime;
- }
-
- // Save the next start_index to dump from:
- lol_next_dump_index = start_index + lines_to_dump;
-
- var request = this.createRequest(command);
- request.arguments = {};
- request.arguments.start = start_index;
- request.arguments.count = lines_to_dump;
-
- return request;
-};
-
-
// Create a JSON request for the evaluation command.
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
lookup_handle = null;
- if (lol_is_enabled) {
- // Check if the expression is a obj id in the form @<obj id>.
- var obj_id_match = expression.match(/^@([0-9]+)$/);
- if (obj_id_match) {
- var obj_id = parseInt(obj_id_match[1]);
- // Build a dump request.
- var request = this.createRequest('getobj');
- request.arguments = {};
- request.arguments.obj_id = obj_id;
- return request.toJSONProtocol();
- }
- }
-
// Check if the expression is a handle id in the form #<handle>#.
var handle_match = expression.match(/^#([0-9]*)#$/);
if (handle_match) {
@@ -1170,10 +1116,6 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
// Build a evaluate request from the text command.
request = this.createRequest('frame');
last_cmd = 'info args';
- } else if (lol_is_enabled &&
- args && (args == 'liveobjectlist' || args == 'lol')) {
- // Build a evaluate request from the text command.
- return this.liveObjectListToJSONRequest_(null);
} else {
throw new Error('Invalid info arguments.');
}
@@ -1224,262 +1166,6 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) {
};
-// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
-DebugRequest.prototype.lolMakeListRequest =
- function(cmd, args, first_arg_index, is_repeating) {
-
- var request;
- var start_index = 0;
- var dump_limit = void 0;
- var type_filter = void 0;
- var space_filter = void 0;
- var prop_filter = void 0;
- var is_verbose = false;
- var i;
-
- for (i = first_arg_index; i < args.length; i++) {
- var arg = args[i];
- // Check for [v[erbose]]:
- if (arg === 'verbose' || arg === 'v') {
- // Nothing to do. This is already implied by args.length > 3.
- is_verbose = true;
-
- // Check for [<N>]:
- } else if (arg.match(/^[0-9]+$/)) {
- dump_limit = arg;
- is_verbose = true;
-
- // Check for i[ndex] <i>:
- } else if (arg === 'index' || arg === 'i') {
- i++;
- if (args.length < i) {
- throw new Error('Missing index after ' + arg + '.');
- }
- start_index = parseInt(args[i]);
- // The user input start index starts at 1:
- if (start_index <= 0) {
- throw new Error('Invalid index ' + args[i] + '.');
- }
- start_index -= 1;
- is_verbose = true;
-
- // Check for t[ype] <type>:
- } else if (arg === 'type' || arg === 't') {
- i++;
- if (args.length < i) {
- throw new Error('Missing type after ' + arg + '.');
- }
- type_filter = args[i];
-
- // Check for space <heap space name>:
- } else if (arg === 'space' || arg === 'sp') {
- i++;
- if (args.length < i) {
- throw new Error('Missing space name after ' + arg + '.');
- }
- space_filter = args[i];
-
- // Check for property <prop name>:
- } else if (arg === 'property' || arg === 'prop') {
- i++;
- if (args.length < i) {
- throw new Error('Missing property name after ' + arg + '.');
- }
- prop_filter = args[i];
-
- } else {
- throw new Error('Unknown args at ' + arg + '.');
- }
- }
-
- // Build the verbose request:
- if (is_verbose) {
- request = this.createLOLRequest('lol-'+cmd,
- start_index,
- dump_limit,
- is_repeating);
- request.arguments.verbose = true;
- } else {
- request = this.createRequest('lol-'+cmd);
- request.arguments = {};
- }
-
- request.arguments.filter = {};
- if (type_filter) {
- request.arguments.filter.type = type_filter;
- }
- if (space_filter) {
- request.arguments.filter.space = space_filter;
- }
- if (prop_filter) {
- request.arguments.filter.prop = prop_filter;
- }
-
- return request;
-};
-
-
-function extractObjId(args) {
- var id = args;
- id = id.match(/^@([0-9]+)$/);
- if (id) {
- id = id[1];
- } else {
- throw new Error('Invalid obj id ' + args + '.');
- }
- return parseInt(id);
-}
-
-
-DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
- var request;
- // Use default command if one is not specified:
- if (!args) {
- args = 'info';
- }
-
- var orig_args = args;
- var first_arg_index;
-
- var arg, i;
- var args = args.split(/\s+/g);
- var cmd = args[0];
- var id;
-
- // Command: <id> [v[erbose]] ...
- if (cmd.match(/^[0-9]+$/)) {
- // Convert to the padded list command:
- // Command: l[ist] <dummy> <id> [v[erbose]] ...
-
- // Insert the implicit 'list' in front and process as normal:
- cmd = 'list';
- args.unshift(cmd);
- }
-
- switch(cmd) {
- // Command: c[apture]
- case 'capture':
- case 'c':
- request = this.createRequest('lol-capture');
- break;
-
- // Command: clear|d[elete] <id>|all
- case 'clear':
- case 'delete':
- case 'del': {
- if (args.length < 2) {
- throw new Error('Missing argument after ' + cmd + '.');
- } else if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- id = args[1];
- if (id.match(/^[0-9]+$/)) {
- // Delete a specific lol record:
- request = this.createRequest('lol-delete');
- request.arguments = {};
- request.arguments.id = parseInt(id);
- } else if (id === 'all') {
- // Delete all:
- request = this.createRequest('lol-reset');
- } else {
- throw new Error('Invalid argument after ' + cmd + '.');
- }
- break;
- }
-
- // Command: diff <id1> <id2> [<dump options>]
- case 'diff':
- first_arg_index = 3;
-
- // Command: list <dummy> <id> [<dump options>]
- case 'list':
-
- // Command: ret[ainers] <obj id> [<dump options>]
- case 'retainers':
- case 'ret':
- case 'retaining-paths':
- case 'rp': {
- if (cmd === 'ret') cmd = 'retainers';
- else if (cmd === 'rp') cmd = 'retaining-paths';
-
- if (!first_arg_index) first_arg_index = 2;
-
- if (args.length < first_arg_index) {
- throw new Error('Too few arguments after ' + cmd + '.');
- }
-
- var request_cmd = (cmd === 'list') ? 'diff':cmd;
- request = this.lolMakeListRequest(request_cmd,
- args,
- first_arg_index,
- is_repeating);
-
- if (cmd === 'diff') {
- request.arguments.id1 = parseInt(args[1]);
- request.arguments.id2 = parseInt(args[2]);
- } else if (cmd == 'list') {
- request.arguments.id1 = 0;
- request.arguments.id2 = parseInt(args[1]);
- } else {
- request.arguments.id = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: getid
- case 'getid': {
- request = this.createRequest('lol-getid');
- request.arguments = {};
- request.arguments.address = args[1];
- break;
- }
-
- // Command: inf[o] [<N>]
- case 'info':
- case 'inf': {
- if (args.length > 2) {
- throw new Error('Too many arguments after ' + cmd + '.');
- }
- // Built the info request:
- request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
- break;
- }
-
- // Command: path <obj id 1> <obj id 2>
- case 'path': {
- request = this.createRequest('lol-path');
- request.arguments = {};
- if (args.length > 2) {
- request.arguments.id1 = extractObjId(args[1]);
- request.arguments.id2 = extractObjId(args[2]);
- } else {
- request.arguments.id1 = 0;
- request.arguments.id2 = extractObjId(args[1]);
- }
- break;
- }
-
- // Command: print
- case 'print': {
- request = this.createRequest('lol-print');
- request.arguments = {};
- request.arguments.id = extractObjId(args[1]);
- break;
- }
-
- // Command: reset
- case 'reset': {
- request = this.createRequest('lol-reset');
- break;
- }
-
- default:
- throw new Error('Invalid arguments.');
- }
- return request.toJSONProtocol();
-};
-
-
// Create a JSON request for the threads command.
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
// Build a threads request from the text command.
@@ -1545,7 +1231,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('inf[o] br[eak] - prints info about breakpoints in use');
print('inf[o] ar[gs] - prints info about arguments of the current function');
print('inf[o] lo[cals] - prints info about locals in the current function');
- print('inf[o] liveobjectlist|lol - same as \'lol info\'');
print('');
print('step [in | next | out| min [step count]]');
print('c[ontinue] - continue executing after a breakpoint');
@@ -1566,49 +1251,6 @@ DebugRequest.prototype.helpCommand_ = function(args) {
print('');
print('gc - runs the garbage collector');
print('');
-
- if (lol_is_enabled) {
- print('liveobjectlist|lol <command> - live object list tracking.');
- print(' where <command> can be:');
- print(' c[apture] - captures a LOL list.');
- print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
- print(' If \'all\' is unspecified instead, will clear all.');
- print(' diff <id1> <id2> [<dump options>]');
- print(' - prints the diff between LOLs id1 and id2.');
- print(' - also see <dump options> below.');
- print(' getid <address> - gets the obj id for the specified address if available.');
- print(' The address must be in hex form prefixed with 0x.');
- print(' inf[o] [<N>] - lists summary info of all LOL lists.');
- print(' If N is specified, will print N items at a time.');
- print(' [l[ist]] <id> [<dump options>]');
- print(' - prints the listing of objects in LOL id.');
- print(' - also see <dump options> below.');
- print(' reset - clears all LOL lists.');
- print(' ret[ainers] <id> [<dump options>]');
- print(' - prints the list of retainers of obj id.');
- print(' - also see <dump options> below.');
- print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
- print(' If only one id is specified, will print the path from');
- print(' roots to the specified object if available.');
- print(' print <id> - prints the obj for the specified obj id if available.');
- print('');
- print(' <dump options> includes:');
- print(' [v[erbose]] - do verbose dump.');
- print(' [<N>] - dump N items at a time. Implies verbose dump.');
- print(' If unspecified, N will default to '+
- kDefaultLolLinesToPrintAtATime+'. Max N is '+
- kMaxLolLinesToPrintAtATime+'.');
- print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
- print(' [t[ype] <type>] - filter by type.');
- print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
- print(' { cell, code, lo, map, new, old-data, old-pointer }.');
- print('');
- print(' If the verbose option, or an option that implies a verbose dump');
- print(' is specified, then a verbose dump will requested. Else, a summary dump');
- print(' will be requested.');
- print('');
- }
-
print('trace compile');
// hidden command: trace debug json - toggles tracing of debug json packets
print('');
@@ -1709,237 +1351,6 @@ function refObjectToString_(protocolPackage, handle) {
}
-function decodeLolCaptureResponse(body) {
- var result;
- result = 'Captured live object list '+ body.id +
- ': count '+ body.count + ' size ' + body.size;
- return result;
-}
-
-
-function decodeLolDeleteResponse(body) {
- var result;
- result = 'Deleted live object list '+ body.id;
- return result;
-}
-
-
-function digitsIn(value) {
- var digits = 0;
- if (value === 0) value = 1;
- while (value >= 1) {
- digits++;
- value /= 10;
- }
- return digits;
-}
-
-
-function padding(value, max_digits) {
- var padding_digits = max_digits - digitsIn(value);
- var padding = '';
- while (padding_digits > 0) {
- padding += ' ';
- padding_digits--;
- }
- return padding;
-}
-
-
-function decodeLolInfoResponse(body) {
- var result;
- var lists = body.lists;
- var length = lists.length;
- var first_index = body.first_index + 1;
- var has_more = ((first_index + length) <= body.count);
- result = 'captured live object lists';
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ body.count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- var max_digits = digitsIn(body.count);
- var last_count = 0;
- var last_size = 0;
- for (var i = 0; i < length; i++) {
- var entry = lists[i];
- var count = entry.count;
- var size = entry.size;
- var index = first_index + i;
- result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
- ': count '+ count;
- if (last_count > 0) {
- result += '(+' + (count - last_count) + ')';
- }
- result += ' size '+ size;
- if (last_size > 0) {
- result += '(+' + (size - last_size) + ')';
- }
- result += '\n';
- last_count = count;
- last_size = size;
- }
- result += ' total: '+length+' lists\n';
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
-
- return result;
-}
-
-
-function decodeLolListResponse(body, title) {
-
- var result;
- var total_count = body.count;
- var total_size = body.size;
- var length;
- var max_digits;
- var i;
- var entry;
- var index;
-
- var max_count_digits = digitsIn(total_count);
- var max_size_digits;
-
- var summary = body.summary;
- if (summary) {
-
- var roots_count = 0;
- var found_root = body.found_root || 0;
- var found_weak_root = body.found_weak_root || 0;
-
- // Print the summary result:
- result = 'summary of objects:\n';
- length = summary.length;
- if (found_root !== 0) {
- roots_count++;
- }
- if (found_weak_root !== 0) {
- roots_count++;
- }
- max_digits = digitsIn(length + roots_count);
- max_size_digits = digitsIn(total_size);
-
- index = 1;
- if (found_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <root>\n';
- index++;
- }
- if (found_weak_root !== 0) {
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ 1 + padding(0, max_count_digits) +
- ' '+ padding(0, max_size_digits+1) +
- ' : <weak root>\n';
- index++;
- }
-
- for (i = 0; i < length; i++) {
- entry = summary[i];
- var count = entry.count;
- var size = entry.size;
- result += ' [' + padding(index, max_digits) + index + '] ' +
- ' count '+ count + padding(count, max_count_digits) +
- ' size '+ size + padding(size, max_size_digits) +
- ' : <' + entry.desc + '>\n';
- index++;
- }
- result += '\n total count: '+(total_count+roots_count)+'\n';
- if (body.size) {
- result += ' total size: '+body.size+'\n';
- }
-
- } else {
- // Print the full dump result:
- var first_index = body.first_index + 1;
- var elements = body.elements;
- length = elements.length;
- var has_more = ((first_index + length) <= total_count);
- result = title;
- if (has_more || (first_index != 1)) {
- result += ' ['+ length +' of '+ total_count +
- ': starting from '+ first_index +']';
- }
- result += ':\n';
- if (length === 0) result += ' none\n';
- max_digits = digitsIn(length);
-
- var max_id = 0;
- var max_size = 0;
- for (i = 0; i < length; i++) {
- entry = elements[i];
- if (entry.id > max_id) max_id = entry.id;
- if (entry.size > max_size) max_size = entry.size;
- }
- var max_id_digits = digitsIn(max_id);
- max_size_digits = digitsIn(max_size);
-
- for (i = 0; i < length; i++) {
- entry = elements[i];
- index = first_index + i;
- result += ' ['+ padding(index, max_digits) + index +']';
- if (entry.id !== 0) {
- result += ' @' + entry.id + padding(entry.id, max_id_digits) +
- ': size ' + entry.size + ', ' +
- padding(entry.size, max_size_digits) + entry.desc + '\n';
- } else {
- // Must be a root or weak root:
- result += ' ' + entry.desc + '\n';
- }
- }
- if (has_more) {
- result += ' -- press <enter> for more --\n';
- } else {
- repeat_cmd_line = '';
- }
- if (length === 0) result += ' none\n';
- }
-
- return result;
-}
-
-
-function decodeLolDiffResponse(body) {
- var title = 'objects';
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolRetainersResponse(body) {
- var title = 'retainers for @' + body.id;
- return decodeLolListResponse(body, title);
-}
-
-
-function decodeLolPathResponse(body) {
- return body.path;
-}
-
-
-function decodeLolResetResponse(body) {
- return 'Reset all live object lists.';
-}
-
-
-function decodeLolGetIdResponse(body) {
- if (body.id == 0) {
- return 'Address is invalid, or object has been moved or collected';
- }
- return 'obj id is @' + body.id;
-}
-
-
-function decodeLolPrintResponse(body) {
- return body.dump;
-}
-
-
// Rounds number 'num' to 'length' decimal places.
function roundNumber(num, length) {
var factor = Math.pow(10, length);
@@ -2276,34 +1687,6 @@ function DebugResponseDetails(response) {
}
break;
- case 'lol-capture':
- details.text = decodeLolCaptureResponse(body);
- break;
- case 'lol-delete':
- details.text = decodeLolDeleteResponse(body);
- break;
- case 'lol-diff':
- details.text = decodeLolDiffResponse(body);
- break;
- case 'lol-getid':
- details.text = decodeLolGetIdResponse(body);
- break;
- case 'lol-info':
- details.text = decodeLolInfoResponse(body);
- break;
- case 'lol-print':
- details.text = decodeLolPrintResponse(body);
- break;
- case 'lol-reset':
- details.text = decodeLolResetResponse(body);
- break;
- case 'lol-retainers':
- details.text = decodeLolRetainersResponse(body);
- break;
- case 'lol-path':
- details.text = decodeLolPathResponse(body);
- break;
-
default:
details.text =
'Response for unknown command \'' + response.command() + '\'' +
diff --git a/src/3rdparty/v8/src/data-flow.h b/src/3rdparty/v8/src/data-flow.h
index 71f56e7..7eeb794 100644
--- a/src/3rdparty/v8/src/data-flow.h
+++ b/src/3rdparty/v8/src/data-flow.h
@@ -199,6 +199,61 @@ class BitVector: public ZoneObject {
uint32_t* data_;
};
+class GrowableBitVector BASE_EMBEDDED {
+ public:
+ class Iterator BASE_EMBEDDED {
+ public:
+ Iterator(const GrowableBitVector* target, Zone* zone)
+ : it_(target->bits_ == NULL
+ ? new(zone) BitVector(1, zone)
+ : target->bits_) { }
+ bool Done() const { return it_.Done(); }
+ void Advance() { it_.Advance(); }
+ int Current() const { return it_.Current(); }
+ private:
+ BitVector::Iterator it_;
+ };
+
+ GrowableBitVector() : bits_(NULL) { }
+
+ bool Contains(int value) const {
+ if (!InBitsRange(value)) return false;
+ return bits_->Contains(value);
+ }
+
+ void Add(int value, Zone* zone) {
+ EnsureCapacity(value, zone);
+ bits_->Add(value);
+ }
+
+ void Union(const GrowableBitVector& other, Zone* zone) {
+ for (Iterator it(&other, zone); !it.Done(); it.Advance()) {
+ Add(it.Current(), zone);
+ }
+ }
+
+ void Clear() { if (bits_ != NULL) bits_->Clear(); }
+
+ private:
+ static const int kInitialLength = 1024;
+
+ bool InBitsRange(int value) const {
+ return bits_ != NULL && bits_->length() > value;
+ }
+
+ void EnsureCapacity(int value, Zone* zone) {
+ if (InBitsRange(value)) return;
+ int new_length = bits_ == NULL ? kInitialLength : bits_->length();
+ while (new_length <= value) new_length *= 2;
+ BitVector* new_bits = new(zone) BitVector(new_length, zone);
+ if (bits_ != NULL) new_bits->CopyFrom(*bits_);
+ bits_ = new_bits;
+ }
+
+ BitVector* bits_;
+};
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/date.js b/src/3rdparty/v8/src/date.js
index a54cb23..c75d12c 100644
--- a/src/3rdparty/v8/src/date.js
+++ b/src/3rdparty/v8/src/date.js
@@ -107,7 +107,7 @@ function MakeDay(year, month, date) {
}
// Now we rely on year and month being SMIs.
- return %DateMakeDay(year, month) + date - 1;
+ return %DateMakeDay(year | 0, month | 0) + date - 1;
}
diff --git a/src/3rdparty/v8/src/debug-agent.cc b/src/3rdparty/v8/src/debug-agent.cc
index e856222..811c00e 100644
--- a/src/3rdparty/v8/src/debug-agent.cc
+++ b/src/3rdparty/v8/src/debug-agent.cc
@@ -192,21 +192,14 @@ void DebuggerAgentSession::Run() {
}
// Convert UTF-8 to UTF-16.
- unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg));
- int len = 0;
- while (buf.has_more()) {
- buf.GetNext();
- len++;
- }
- ScopedVector<int16_t> temp(len + 1);
- buf.Reset(msg, StrLength(msg));
- for (int i = 0; i < len; i++) {
- temp[i] = buf.GetNext();
- }
+ unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg));
+ int utf16_length = decoder.Utf16Length();
+ ScopedVector<uint16_t> temp(utf16_length + 1);
+ decoder.WriteUtf16(temp.start(), utf16_length);
// Send the request received to the debugger.
- v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()),
- len,
+ v8::Debug::SendCommand(temp.start(),
+ utf16_length,
NULL,
reinterpret_cast<v8::Isolate*>(agent_->isolate()));
diff --git a/src/3rdparty/v8/src/debug-debugger.js b/src/3rdparty/v8/src/debug-debugger.js
index 796d6aa..6c94c15 100644
--- a/src/3rdparty/v8/src/debug-debugger.js
+++ b/src/3rdparty/v8/src/debug-debugger.js
@@ -110,7 +110,6 @@ var debugger_flags = {
}
},
};
-var lol_is_enabled = %HasLOLEnabled();
// Create a new break point object and add it to the list of break points.
@@ -1311,9 +1310,12 @@ ProtocolMessage.prototype.setOption = function(name, value) {
};
-ProtocolMessage.prototype.failed = function(message) {
+ProtocolMessage.prototype.failed = function(message, opt_details) {
this.success = false;
this.message = message;
+ if (IS_OBJECT(opt_details)) {
+ this.error_details = opt_details;
+ }
};
@@ -1360,6 +1362,9 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) {
json.message = this.message;
}
+ if (this.error_details) {
+ json.error_details = this.error_details;
+ }
json.running = this.running;
return JSON.stringify(json);
};
@@ -1432,10 +1437,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
+ } else if (request.command == 'setVariableValue') {
+ this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'getobj') {
- this.getobjRequest_(request, response);
} else if (request.command == 'lookup') {
this.lookupRequest_(request, response);
} else if (request.command == 'references') {
@@ -1465,28 +1470,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
} else if (request.command == 'gc') {
this.gcRequest_(request, response);
- // LiveObjectList tools:
- } else if (lol_is_enabled && request.command == 'lol-capture') {
- this.lolCaptureRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-delete') {
- this.lolDeleteRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-diff') {
- this.lolDiffRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-getid') {
- this.lolGetIdRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-info') {
- this.lolInfoRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-reset') {
- this.lolResetRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-retainers') {
- this.lolRetainersRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-path') {
- this.lolPathRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-print') {
- this.lolPrintRequest_(request, response);
- } else if (lol_is_enabled && request.command == 'lol-stats') {
- this.lolStatsRequest_(request, response);
-
} else {
throw new Error('Unknown command "' + request.command + '" in request');
}
@@ -1958,11 +1941,12 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
+DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
+ function(scope_description) {
// Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame.
- if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
- frame_index = request.arguments.frameNumber;
+ if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
+ frame_index = scope_description.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number');
}
@@ -1976,13 +1960,13 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
- function(request) {
- if (request.arguments && "functionHandle" in request.arguments) {
- if (!IS_NUMBER(request.arguments.functionHandle)) {
+DebugCommandProcessor.prototype.resolveScopeHolder_ =
+ function(scope_description) {
+ if (scope_description && "functionHandle" in scope_description) {
+ if (!IS_NUMBER(scope_description.functionHandle)) {
throw new Error('Function handle must be a number');
}
- var function_mirror = LookupMirror(request.arguments.functionHandle);
+ var function_mirror = LookupMirror(scope_description.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
@@ -1997,14 +1981,14 @@ DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
}
// Get the frame for which the scopes are requested.
- var frame = this.frameForScopeRequest_(request);
+ var frame = this.resolveFrameFromScopeDescription_(scope_description);
return frame;
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- var scope_holder = this.scopeHolderForScopeRequest_(request);
+ var scope_holder = this.resolveScopeHolder_(request.arguments);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
@@ -2023,7 +2007,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested.
- var scope_holder = this.scopeHolderForScopeRequest_(request);
+ var scope_holder = this.resolveScopeHolder_(request.arguments);
// With no scope argument just return top scope.
var scope_index = 0;
@@ -2038,6 +2022,77 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
};
+// Reads value from protocol description. Description may be in form of type
+// (for singletons), raw value (primitive types supported in JSON),
+// string value description plus type (for primitive values) or handle id.
+// Returns raw value or throws exception.
+DebugCommandProcessor.resolveValue_ = function(value_description) {
+ if ("handle" in value_description) {
+ var value_mirror = LookupMirror(value_description.handle);
+ if (!value_mirror) {
+ throw new Error("Failed to resolve value by handle, ' #" +
+ mapping.handle + "# not found");
+ }
+ return value_mirror.value();
+ } else if ("stringDescription" in value_description) {
+ if (value_description.type == BOOLEAN_TYPE) {
+ return Boolean(value_description.stringDescription);
+ } else if (value_description.type == NUMBER_TYPE) {
+ return Number(value_description.stringDescription);
+ } if (value_description.type == STRING_TYPE) {
+ return String(value_description.stringDescription);
+ } else {
+ throw new Error("Unknown type");
+ }
+ } else if ("value" in value_description) {
+ return value_description.value;
+ } else if (value_description.type == UNDEFINED_TYPE) {
+ return void 0;
+ } else if (value_description.type == NULL_TYPE) {
+ return null;
+ } else {
+ throw new Error("Failed to parse value description");
+ }
+};
+
+
+DebugCommandProcessor.prototype.setVariableValueRequest_ =
+ function(request, response) {
+ if (!request.arguments) {
+ response.failed('Missing arguments');
+ return;
+ }
+
+ if (IS_UNDEFINED(request.arguments.name)) {
+ response.failed('Missing variable name');
+ }
+ var variable_name = request.arguments.name;
+
+ var scope_description = request.arguments.scope;
+
+ // Get the frame or function for which the scope is requested.
+ var scope_holder = this.resolveScopeHolder_(scope_description);
+
+ if (IS_UNDEFINED(scope_description.number)) {
+ response.failed('Missing scope number');
+ }
+ var scope_index = %ToNumber(scope_description.number);
+
+ var scope = scope_holder.scope(scope_index);
+
+ var new_value =
+ DebugCommandProcessor.resolveValue_(request.arguments.newValue);
+
+ scope.setVariableValue(variable_name, new_value);
+
+ var new_value_mirror = MakeMirror(new_value);
+
+ response.body = {
+ newValue: new_value_mirror
+ };
+};
+
+
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -2068,16 +2123,14 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
additional_context_object = {};
for (var i = 0; i < additional_context.length; i++) {
var mapping = additional_context[i];
- if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) {
+
+ if (!IS_STRING(mapping.name)) {
return response.failed("Context element #" + i +
- " must contain name:string and handle:number");
+ " doesn't contain name:string property");
}
- var context_value_mirror = LookupMirror(mapping.handle);
- if (!context_value_mirror) {
- return response.failed("Context object '" + mapping.name +
- "' #" + mapping.handle + "# not found");
- }
- additional_context_object[mapping.name] = context_value_mirror.value();
+
+ var raw_value = DebugCommandProcessor.resolveValue_(mapping);
+ additional_context_object[mapping.name] = raw_value;
}
}
@@ -2118,24 +2171,6 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
- if (!request.arguments) {
- return response.failed('Missing arguments');
- }
-
- // Pull out arguments.
- var obj_id = request.arguments.obj_id;
-
- // Check for legal arguments.
- if (IS_UNDEFINED(obj_id)) {
- return response.failed('Argument "obj_id" missing');
- }
-
- // Dump the object.
- response.body = MakeMirror(%GetLOLObj(obj_id));
-};
-
-
DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -2392,8 +2427,17 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source;
- var result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
+ var result_description;
+ try {
+ result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
+ } catch (e) {
+ if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
+ response.failed(e.message, e.details);
+ return;
+ }
+ throw e;
+ }
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@@ -2485,86 +2529,6 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.lolCaptureRequest_ =
- function(request, response) {
- response.body = %CaptureLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolDeleteRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var result = %DeleteLOL(id);
- if (result) {
- response.body = { id: id };
- } else {
- response.failed('Failed to delete: live object list ' + id + ' not found.');
- }
-};
-
-
-DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- var verbose = request.arguments.verbose;
- var filter = request.arguments.filter;
- if (verbose === true) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %DumpLOL(id1, id2, start, count, filter);
- } else {
- response.body = %SummarizeLOL(id1, id2, filter);
- }
-};
-
-
-DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
- var address = request.arguments.address;
- response.body = {};
- response.body.id = %GetLOLObjId(address);
-};
-
-
-DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
- var start = request.arguments.start;
- var count = request.arguments.count;
- response.body = %InfoLOL(start, count);
-};
-
-
-DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
- %ResetLOL();
-};
-
-
-DebugCommandProcessor.prototype.lolRetainersRequest_ =
- function(request, response) {
- var id = request.arguments.id;
- var verbose = request.arguments.verbose;
- var start = request.arguments.start;
- var count = request.arguments.count;
- var filter = request.arguments.filter;
-
- response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
- start, count, filter);
-};
-
-
-DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
- var id1 = request.arguments.id1;
- var id2 = request.arguments.id2;
- response.body = {};
- response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
-};
-
-
-DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
- var id = request.arguments.id;
- response.body = {};
- response.body.dump = %PrintLOLObj(id);
-};
-
-
// Check whether the previously processed command caused the VM to become
// running.
DebugCommandProcessor.prototype.isRunning = function() {
@@ -2668,3 +2632,7 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
+
+Debug.TestApi = {
+ CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
+};
diff --git a/src/3rdparty/v8/src/debug.cc b/src/3rdparty/v8/src/debug.cc
index ec25acc..2821578 100644
--- a/src/3rdparty/v8/src/debug.cc
+++ b/src/3rdparty/v8/src/debug.cc
@@ -389,8 +389,8 @@ void BreakLocationIterator::ClearDebugBreak() {
}
-void BreakLocationIterator::PrepareStepIn() {
- HandleScope scope;
+void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
+ HandleScope scope(isolate);
// Step in can only be prepared if currently positioned on an IC call,
// construct call or CallFunction stub call.
@@ -617,10 +617,10 @@ void ScriptCache::Add(Handle<Script> script) {
Handle<Script> script_ =
Handle<Script>::cast(
(global_handles->Create(*script)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(script_.location()),
- this,
- ScriptCache::HandleWeakScript);
+ global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()),
+ this,
+ NULL,
+ ScriptCache::HandleWeakScript);
entry->value = script_.location();
}
@@ -663,7 +663,9 @@ void ScriptCache::Clear() {
}
-void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
+void ScriptCache::HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data);
// Find the location of the global handle.
Script** location =
@@ -676,7 +678,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) {
script_cache->collected_scripts_.Add(id);
// Clear the weak handle.
- obj.Dispose();
+ obj.Dispose(isolate);
obj.Clear();
}
@@ -696,8 +698,10 @@ void Debug::SetUp(bool create_heap_objects) {
}
-void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) {
- Debug* debug = Isolate::Current()->debug();
+void Debug::HandleWeakDebugInfo(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
+ Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug();
DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data);
// We need to clear all breakpoints associated with the function to restore
// original code and avoid patching the code twice later because
@@ -721,10 +725,10 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
(global_handles->Create(debug_info)));
- global_handles->MakeWeak(
- reinterpret_cast<Object**>(debug_info_.location()),
- this,
- Debug::HandleWeakDebugInfo);
+ global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
+ this,
+ NULL,
+ Debug::HandleWeakDebugInfo);
}
@@ -774,8 +778,11 @@ bool Debug::CompileDebuggerScript(int index) {
factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> exception =
- Execution::TryCall(function, Handle<Object>(context->global_object()),
- 0, NULL, &caught_exception);
+ Execution::TryCall(function,
+ Handle<Object>(context->global_object(), isolate),
+ 0,
+ NULL,
+ &caught_exception);
// Check for caught exceptions.
if (caught_exception) {
@@ -786,9 +793,11 @@ bool Debug::CompileDebuggerScript(int index) {
"error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
- isolate->clear_pending_exception();
+ if (!exception.is_null()) {
+ isolate->set_pending_exception(*exception);
+ MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ isolate->clear_pending_exception();
+ }
return false;
}
@@ -821,7 +830,6 @@ bool Debug::Load() {
HandleScope scope(isolate_);
Handle<Context> context =
isolate_->bootstrapper()->CreateEnvironment(
- isolate_,
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
NULL);
@@ -834,12 +842,16 @@ bool Debug::Load() {
isolate_->set_context(*context);
// Expose the builtins object in the debugger context.
- Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
+ Handle<String> key = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("builtins"));
Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate_,
- JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()),
- NONE, kNonStrictMode),
+ JSReceiver::SetProperty(global,
+ key,
+ Handle<Object>(global->builtins(), isolate_),
+ NONE,
+ kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
@@ -944,10 +956,10 @@ Object* Debug::Break(Arguments args) {
// If there is one or more real break points check whether any of these are
// triggered.
- Handle<Object> break_points_hit(heap->undefined_value());
+ Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
if (break_location_iterator.HasBreakPoint()) {
Handle<Object> break_point_objects =
- Handle<Object>(break_location_iterator.BreakPointObjects());
+ Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_);
break_points_hit = CheckBreakPoints(break_point_objects);
}
@@ -1065,7 +1077,7 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
- Handle<Object> o(array->get(i));
+ Handle<Object> o(array->get(i), isolate_);
if (CheckBreakPoint(o)) {
break_points_hit->set(break_points_hit_count++, *o);
}
@@ -1097,12 +1109,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
if (!break_point_object->IsJSObject()) return true;
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
- Handle<String> is_break_point_triggered_symbol =
- factory->LookupAsciiSymbol("IsBreakPointTriggered");
+ Handle<String> is_break_point_triggered_string =
+ factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global_object()->GetPropertyNoExceptionThrown(
- *is_break_point_triggered_symbol)));
+ *is_break_point_triggered_string)));
// Get the break id as an object.
Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
@@ -1287,7 +1300,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function) {
void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
Handle<FixedArray> new_bindings(function->function_bindings());
- Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex));
+ Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex),
+ isolate_);
if (!bindee.is_null() && bindee->IsJSFunction() &&
!JSFunction::cast(*bindee)->IsBuiltin()) {
@@ -1485,7 +1499,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
// from the code object.
Handle<Object> obj(
isolate_->heap()->code_stubs()->SlowReverseLookup(
- *call_function_stub));
+ *call_function_stub),
+ isolate_);
ASSERT(!obj.is_null());
ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
@@ -1539,7 +1554,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) {
}
// Step in or Step in min
- it.PrepareStepIn();
+ it.PrepareStepIn(isolate_);
ActivateStepIn(frame);
}
}
@@ -1583,7 +1598,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// object.
bool Debug::IsDebugBreak(Address addr) {
Code* code = Code::GetCodeFromTargetAddress(addr);
- return code->ic_state() == DEBUG_BREAK;
+ return code->is_debug_break();
}
@@ -1658,10 +1673,12 @@ Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared) {
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
- if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
+ if (!HasDebugInfo(shared)) {
+ return Handle<Object>(heap->undefined_value(), isolate);
+ }
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(heap->undefined_value());
+ return Handle<Object>(heap->undefined_value(), isolate);
}
Handle<FixedArray> locations =
isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
@@ -1696,9 +1713,10 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
Handle<Object> holder,
Address fp,
bool is_constructor) {
+ Isolate* isolate = function->GetIsolate();
// If the frame pointer is not supplied by the caller find it.
if (fp == 0) {
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
it.Advance();
// For constructor functions skip another frame.
if (is_constructor) {
@@ -1717,9 +1735,9 @@ void Debug::HandleStepIn(Handle<JSFunction> function,
} else if (!function->IsBuiltin()) {
// Don't allow step into functions in the native context.
if (function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) ||
+ isolate->builtins()->builtin(Builtins::kFunctionApply) ||
function->shared()->code() ==
- Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) {
+ isolate->builtins()->builtin(Builtins::kFunctionCall)) {
// Handle function.apply and function.call separately to flood the
// function to be called and not the code for Builtins::FunctionApply or
// Builtins::FunctionCall. The receiver of call/apply is the target
@@ -1997,14 +2015,15 @@ void Debug::PrepareForBreakPoints() {
{
// We are going to iterate heap to find all functions without
// debug break slots.
- isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "preparing for breakpoints");
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "preparing for breakpoints");
// Ensure no GC in this scope as we are going to use gc_metadata
// field in the Code object to mark active functions.
AssertNoAllocation no_allocation;
- Object* active_code_marker = isolate_->heap()->the_hole_value();
+ Object* active_code_marker = heap->the_hole_value();
CollectActiveFunctionsFromThread(isolate_,
isolate_->thread_local_top(),
@@ -2018,7 +2037,7 @@ void Debug::PrepareForBreakPoints() {
// Scan the heap for all non-optimized functions which have no
// debug break slots and are not active or inlined into an active
// function and mark them for lazy compilation.
- HeapIterator iterator;
+ HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (((obj = iterator.next()) != NULL)) {
if (obj->IsJSFunction()) {
@@ -2113,11 +2132,12 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
int target_start_position = RelocInfo::kNoPosition;
Handle<JSFunction> target_function;
Handle<SharedFunctionInfo> target;
+ Heap* heap = isolate_->heap();
while (!done) {
{ // Extra scope for iterator and no-allocation.
- isolate_->heap()->EnsureHeapIsIterable();
+ heap->EnsureHeapIsIterable();
AssertNoAllocation no_alloc_during_heap_iteration;
- HeapIterator iterator;
+ HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next();
obj != NULL; obj = iterator.next()) {
bool found_next_candidate = false;
@@ -2177,9 +2197,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
} // End for loop.
} // End no-allocation scope.
- if (target.is_null()) {
- return isolate_->heap()->undefined_value();
- }
+ if (target.is_null()) return heap->undefined_value();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2423,11 +2441,11 @@ void Debug::ClearMirrorCache() {
ASSERT(isolate_->context() == *Debug::debug_context());
// Clear the mirror cache.
- Handle<String> function_name =
- isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
+ Handle<String> function_name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("ClearMirrorCache"));
Handle<Object> fun(
- Isolate::Current()->global_object()->GetPropertyNoExceptionThrown(
- *function_name));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name),
+ isolate_);
ASSERT(fun->IsJSFunction());
bool caught_exception;
Execution::TryCall(Handle<JSFunction>::cast(fun),
@@ -2453,7 +2471,7 @@ void Debug::CreateScriptCache() {
// Scan heap for Script objects.
int count = 0;
- HeapIterator iterator;
+ HeapIterator iterator(heap);
AssertNoAllocation no_allocation;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
@@ -2552,10 +2570,10 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
// Create the execution state object.
Handle<String> constructor_str =
- isolate_->factory()->LookupSymbol(constructor_name);
+ isolate_->factory()->InternalizeUtf8String(constructor_name);
Handle<Object> constructor(
- isolate_->global_object()->GetPropertyNoExceptionThrown(
- *constructor_str));
+ isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str),
+ isolate_);
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
@@ -2643,7 +2661,7 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id,
bool* caught_exception) {
// Create the script collected event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
- Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id));
+ Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
Handle<Object> argv[] = { exec_state, id_object };
return MakeJSObject(CStrVector("MakeScriptCollectedEvent"),
@@ -2782,11 +2800,14 @@ void Debugger::OnAfterCompile(Handle<Script> script,
// script. Make sure that these break points are set.
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
- Handle<String> update_script_break_points_symbol =
- isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
+ Handle<String> update_script_break_points_string =
+ isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
Handle<Object> update_script_break_points =
- Handle<Object>(debug->debug_context()->global_object()->
- GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
+ Handle<Object>(
+ debug->debug_context()->global_object()->GetPropertyNoExceptionThrown(
+ *update_script_break_points_string),
+ isolate_);
if (!update_script_break_points->IsJSFunction()) {
return;
}
@@ -2936,7 +2957,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event,
Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_));
// Invoke the JavaScript debug event listener.
- Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)),
+ Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
exec_state,
event_data,
event_listener_data_ };
@@ -3319,7 +3340,8 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
Handle<Object> argv[] = { exec_state, data };
Handle<Object> result = Execution::Call(
fun,
- Handle<Object>(isolate_->debug()->debug_context_->global_proxy()),
+ Handle<Object>(isolate_->debug()->debug_context_->global_proxy(),
+ isolate_),
ARRAY_SIZE(argv),
argv,
pending_exception);
@@ -3762,6 +3784,7 @@ void MessageDispatchHelperThread::Schedule() {
void MessageDispatchHelperThread::Run() {
+ Isolate* isolate = Isolate::Current();
while (true) {
sem_->Wait();
{
@@ -3769,8 +3792,8 @@ void MessageDispatchHelperThread::Run() {
already_signalled_ = false;
}
{
- Locker locker;
- Isolate::Current()->debugger()->CallMessageDispatchHandler();
+ Locker locker(reinterpret_cast<v8::Isolate*>(isolate));
+ isolate->debugger()->CallMessageDispatchHandler();
}
}
}
diff --git a/src/3rdparty/v8/src/debug.h b/src/3rdparty/v8/src/debug.h
index 150e29e..c7f0681 100644
--- a/src/3rdparty/v8/src/debug.h
+++ b/src/3rdparty/v8/src/debug.h
@@ -97,7 +97,7 @@ class BreakLocationIterator {
void ClearBreakPoint(Handle<Object> break_point_object);
void SetOneShot();
void ClearOneShot();
- void PrepareStepIn();
+ void PrepareStepIn(Isolate* isolate);
bool IsExit() const;
bool HasBreakPoint();
bool IsDebugBreak();
@@ -189,7 +189,9 @@ class ScriptCache : private HashMap {
void Clear();
// Weak handle callback for scripts in the cache.
- static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data);
+ static void HandleWeakScript(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data);
// List used during GC to temporarily store id's of collected scripts.
List<int> collected_scripts_;
@@ -384,7 +386,9 @@ class Debug {
static const int kEstimatedNofBreakPointsInFunction = 16;
// Passed to MakeWeak.
- static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data);
+ static void HandleWeakDebugInfo(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data);
friend class Debugger;
friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc
@@ -875,7 +879,9 @@ class Debugger {
void set_loading_debugger(bool v) { is_loading_debugger_ = v; }
bool is_loading_debugger() const { return is_loading_debugger_; }
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
- bool live_edit_enabled() const { return live_edit_enabled_; }
+ bool live_edit_enabled() const {
+ return FLAG_enable_liveedit && live_edit_enabled_ ;
+ }
void set_force_debugger_active(bool force_debugger_active) {
force_debugger_active_ = force_debugger_active;
}
diff --git a/src/3rdparty/v8/src/deoptimizer.cc b/src/3rdparty/v8/src/deoptimizer.cc
index 9d16211..c0b5945 100644
--- a/src/3rdparty/v8/src/deoptimizer.cc
+++ b/src/3rdparty/v8/src/deoptimizer.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -44,8 +44,18 @@ DeoptimizerData::DeoptimizerData() {
eager_deoptimization_entry_code_entries_ = -1;
lazy_deoptimization_entry_code_entries_ = -1;
size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
- lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+ MemoryAllocator* allocator = Isolate::Current()->memory_allocator();
+ size_t initial_commit_size = OS::CommitPageSize();
+ eager_deoptimization_entry_code_ =
+ allocator->AllocateChunk(deopt_table_size,
+ initial_commit_size,
+ EXECUTABLE,
+ NULL);
+ lazy_deoptimization_entry_code_ =
+ allocator->AllocateChunk(deopt_table_size,
+ initial_commit_size,
+ EXECUTABLE,
+ NULL);
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -55,9 +65,11 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
- delete eager_deoptimization_entry_code_;
+ Isolate::Current()->memory_allocator()->Free(
+ eager_deoptimization_entry_code_);
eager_deoptimization_entry_code_ = NULL;
- delete lazy_deoptimization_entry_code_;
+ Isolate::Current()->memory_allocator()->Free(
+ lazy_deoptimization_entry_code_);
lazy_deoptimization_entry_code_ = NULL;
DeoptimizingCodeListNode* current = deoptimizing_code_list_;
@@ -79,6 +91,36 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) {
#endif
+Code* DeoptimizerData::FindDeoptimizingCode(Address addr) {
+ for (DeoptimizingCodeListNode* node = deoptimizing_code_list_;
+ node != NULL;
+ node = node->next()) {
+ if (node->code()->contains(addr)) return *node->code();
+ }
+ return NULL;
+}
+
+
+void DeoptimizerData::RemoveDeoptimizingCode(Code* code) {
+ for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_;
+ cur != NULL;
+ prev = cur, cur = cur->next()) {
+ if (*cur->code() == code) {
+ if (prev == NULL) {
+ deoptimizing_code_list_ = cur->next();
+ } else {
+ prev->set_next(cur->next());
+ }
+ delete cur;
+ return;
+ }
+ }
+ // Deoptimizing code is removed through weak callback. Each object is expected
+ // to be removed once and only once.
+ UNREACHABLE();
+}
+
+
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
@@ -247,45 +289,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
}
-class DeoptimizingVisitor : public OptimizedFunctionVisitor {
- public:
- virtual void EnterContext(Context* context) {
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
- reinterpret_cast<intptr_t>(context));
- }
- }
-
- virtual void VisitFunction(JSFunction* function) {
- Deoptimizer::DeoptimizeFunction(function);
- }
-
- virtual void LeaveContext(Context* context) {
- context->ClearOptimizedFunctions();
- }
-};
-
-
-void Deoptimizer::DeoptimizeAll() {
- AssertNoAllocation no_allocation;
-
- if (FLAG_trace_deopt) {
- PrintF("[deoptimize all contexts]\n");
- }
-
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctions(&visitor);
-}
-
-
-void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
- AssertNoAllocation no_allocation;
-
- DeoptimizingVisitor visitor;
- VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
-}
-
-
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
Isolate* isolate = context->GetIsolate();
@@ -315,51 +318,169 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext(
}
-void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor) {
+void Deoptimizer::VisitAllOptimizedFunctions(
+ OptimizedFunctionVisitor* visitor) {
AssertNoAllocation no_allocation;
+ // Run through the list of all native contexts and deoptimize.
+ Object* context = Isolate::Current()->heap()->native_contexts_list();
+ while (!context->IsUndefined()) {
+ VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
+ context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+ }
+}
+
+
+// Removes the functions selected by the given filter from the optimized
+// function list of the given context and partitions the removed functions
+// into one or more lists such that all functions in a list share the same
+// code. The head of each list is written in the deoptimizing_functions field
+// of the corresponding code object.
+// The found code objects are returned in the given zone list.
+static void PartitionOptimizedFunctions(Context* context,
+ OptimizedFunctionFilter* filter,
+ ZoneList<Code*>* partitions,
+ Zone* zone,
+ Object* undefined) {
+ AssertNoAllocation no_allocation;
+ Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
+ Object* remainder_head = undefined;
+ Object* remainder_tail = undefined;
+ ASSERT_EQ(0, partitions->length());
+ while (current != undefined) {
+ JSFunction* function = JSFunction::cast(current);
+ current = function->next_function_link();
+ if (filter->TakeFunction(function)) {
+ Code* code = function->code();
+ if (code->deoptimizing_functions() == undefined) {
+ partitions->Add(code, zone);
+ } else {
+ ASSERT(partitions->Contains(code));
+ }
+ function->set_next_function_link(code->deoptimizing_functions());
+ code->set_deoptimizing_functions(function);
+ } else {
+ if (remainder_head == undefined) {
+ remainder_head = function;
+ } else {
+ JSFunction::cast(remainder_tail)->set_next_function_link(function);
+ }
+ remainder_tail = function;
+ }
+ }
+ if (remainder_tail != undefined) {
+ JSFunction::cast(remainder_tail)->set_next_function_link(undefined);
+ }
+ context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head);
+}
+
+
+class DeoptimizeAllFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return true;
+ }
+};
+
+
+class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter {
+ public:
+ explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {}
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code() == code_;
+ }
+ private:
+ Code* code_;
+};
+
+
+void Deoptimizer::DeoptimizeAll() {
+ AssertNoAllocation no_allocation;
+
+ if (FLAG_trace_deopt) {
+ PrintF("[deoptimize all contexts]\n");
+ }
+
+ DeoptimizeAllFilter filter;
+ DeoptimizeAllFunctionsWith(&filter);
+}
+
+
+void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
+ AssertNoAllocation no_allocation;
+ DeoptimizeAllFilter filter;
if (object->IsJSGlobalProxy()) {
Object* proto = object->GetPrototype();
ASSERT(proto->IsJSGlobalObject());
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(proto)->native_context(), visitor);
+ DeoptimizeAllFunctionsForContext(
+ GlobalObject::cast(proto)->native_context(), &filter);
} else if (object->IsGlobalObject()) {
- VisitAllOptimizedFunctionsForContext(
- GlobalObject::cast(object)->native_context(), visitor);
+ DeoptimizeAllFunctionsForContext(
+ GlobalObject::cast(object)->native_context(), &filter);
}
}
-void Deoptimizer::VisitAllOptimizedFunctions(
- OptimizedFunctionVisitor* visitor) {
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ if (!function->IsOptimized()) return;
+ Code* code = function->code();
+ Context* context = function->context()->native_context();
+ Isolate* isolate = context->GetIsolate();
+ Object* undefined = isolate->heap()->undefined_value();
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+ ZoneList<Code*> codes(1, zone);
+ DeoptimizeWithMatchingCodeFilter filter(code);
+ PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined);
+ ASSERT_EQ(1, codes.length());
+ DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction::cast(codes.at(0)->deoptimizing_functions()));
+ codes.at(0)->set_deoptimizing_functions(undefined);
+}
+
+
+void Deoptimizer::DeoptimizeAllFunctionsForContext(
+ Context* context, OptimizedFunctionFilter* filter) {
+ ASSERT(context->IsNativeContext());
+ Isolate* isolate = context->GetIsolate();
+ Object* undefined = isolate->heap()->undefined_value();
+ Zone* zone = isolate->runtime_zone();
+ ZoneScope zone_scope(zone, DELETE_ON_EXIT);
+ ZoneList<Code*> codes(1, zone);
+ PartitionOptimizedFunctions(context, filter, &codes, zone, undefined);
+ for (int i = 0; i < codes.length(); ++i) {
+ DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction::cast(codes.at(i)->deoptimizing_functions()));
+ codes.at(i)->set_deoptimizing_functions(undefined);
+ }
+}
+
+
+void Deoptimizer::DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter) {
AssertNoAllocation no_allocation;
// Run through the list of all native contexts and deoptimize.
Object* context = Isolate::Current()->heap()->native_contexts_list();
while (!context->IsUndefined()) {
- // GC can happen when the context is not fully initialized,
- // so the global field of the context can be undefined.
- Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX);
- if (!global->IsUndefined()) {
- VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global),
- visitor);
- }
+ DeoptimizeAllFunctionsForContext(Context::cast(context), filter);
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
}
}
-void Deoptimizer::HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data) {
+void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* parameter) {
DeoptimizingCodeListNode* node =
- reinterpret_cast<DeoptimizingCodeListNode*>(data);
- RemoveDeoptimizingCode(*node->code());
+ reinterpret_cast<DeoptimizingCodeListNode*>(parameter);
+ DeoptimizerData* data =
+ reinterpret_cast<Isolate*>(isolate)->deoptimizer_data();
+ data->RemoveDeoptimizingCode(*node->code());
#ifdef DEBUG
- node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
- node = node->next();
+ for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
+ current != NULL;
+ current = current->next()) {
+ ASSERT(current != node);
}
#endif
}
@@ -370,6 +491,38 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
}
+bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type,
+ StackFrame::Type frame_type) {
+ switch (deopt_type) {
+ case EAGER:
+ case LAZY:
+ case DEBUGGER:
+ return (frame_type == StackFrame::STUB)
+ ? FLAG_trace_stub_failures
+ : FLAG_trace_deopt;
+ case OSR:
+ return FLAG_trace_osr;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
+const char* Deoptimizer::MessageFor(BailoutType type) {
+ switch (type) {
+ case EAGER:
+ case LAZY:
+ return "DEOPT";
+ case DEBUGGER:
+ return "DEOPT FOR DEBUGGER";
+ case OSR:
+ return "OSR";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
Deoptimizer::Deoptimizer(Isolate* isolate,
JSFunction* function,
BailoutType type,
@@ -390,69 +543,77 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
output_(NULL),
deferred_arguments_objects_values_(0),
deferred_arguments_objects_(0),
- deferred_heap_numbers_(0) {
- if (FLAG_trace_deopt && type != OSR) {
- if (type == DEBUGGER) {
- PrintF("**** DEOPT FOR DEBUGGER: ");
- } else {
- PrintF("**** DEOPT: ");
- }
- function->PrintName();
- PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- } else if (FLAG_trace_osr && type == OSR) {
- PrintF("**** OSR: ");
- function->PrintName();
- PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
- bailout_id,
- reinterpret_cast<intptr_t>(from),
- fp_to_sp_delta - (2 * kPointerSize));
- }
- function->shared()->increment_deopt_count();
- // Find the optimized code.
- if (type == EAGER) {
- ASSERT(from == NULL);
- optimized_code_ = function_->code();
- if (FLAG_trace_deopt && FLAG_code_comments) {
- // Print instruction associated with this bailout.
- const char* last_comment = NULL;
- int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
- | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- if (info->rmode() == RelocInfo::COMMENT) {
- last_comment = reinterpret_cast<const char*>(info->data());
- }
- if (info->rmode() == RelocInfo::RUNTIME_ENTRY) {
- unsigned id = Deoptimizer::GetDeoptimizationId(
- info->target_address(), Deoptimizer::EAGER);
- if (id == bailout_id && last_comment != NULL) {
- PrintF(" %s\n", last_comment);
- break;
- }
- }
- }
- }
- } else if (type == LAZY) {
- optimized_code_ = FindDeoptimizingCodeFromAddress(from);
- ASSERT(optimized_code_ != NULL);
- } else if (type == OSR) {
- // The function has already been optimized and we're transitioning
- // from the unoptimized shared version to the optimized one in the
- // function. The return address (from) points to unoptimized code.
- optimized_code_ = function_->code();
- ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
- ASSERT(!optimized_code_->contains(from));
- } else if (type == DEBUGGER) {
- optimized_code_ = optimized_code;
- ASSERT(optimized_code_->contains(from));
+ deferred_heap_numbers_(0),
+ trace_(false) {
+ // For COMPILED_STUBs called from builtins, the function pointer is a SMI
+ // indicating an internal frame.
+ if (function->IsSmi()) {
+ function = NULL;
+ }
+ if (function != NULL && function->IsOptimized()) {
+ function->shared()->increment_deopt_count();
}
+ compiled_code_ = FindOptimizedCode(function, optimized_code);
+ StackFrame::Type frame_type = function == NULL
+ ? StackFrame::STUB
+ : StackFrame::JAVA_SCRIPT;
+ trace_ = TraceEnabledFor(type, frame_type);
+ if (trace_) Trace();
ASSERT(HEAP->allow_allocation(false));
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
- input_->SetFrameType(StackFrame::JAVA_SCRIPT);
+ input_->SetFrameType(frame_type);
+}
+
+
+Code* Deoptimizer::FindOptimizedCode(JSFunction* function,
+ Code* optimized_code) {
+ switch (bailout_type_) {
+ case Deoptimizer::EAGER:
+ ASSERT(from_ == NULL);
+ return function->code();
+ case Deoptimizer::LAZY: {
+ Code* compiled_code =
+ isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
+ return (compiled_code == NULL)
+ ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_))
+ : compiled_code;
+ }
+ case Deoptimizer::OSR: {
+ // The function has already been optimized and we're transitioning
+ // from the unoptimized shared version to the optimized one in the
+ // function. The return address (from_) points to unoptimized code.
+ Code* compiled_code = function->code();
+ ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(!compiled_code->contains(from_));
+ return compiled_code;
+ }
+ case Deoptimizer::DEBUGGER:
+ ASSERT(optimized_code->contains(from_));
+ return optimized_code;
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Deoptimizer::Trace() {
+ PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_));
+ PrintFunctionName();
+ PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
+ bailout_id_,
+ reinterpret_cast<intptr_t>(from_),
+ fp_to_sp_delta_ - (2 * kPointerSize));
+ if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_);
+}
+
+
+void Deoptimizer::PrintFunctionName() {
+ if (function_->IsJSFunction()) {
+ function_->PrintName();
+ } else {
+ PrintF("%s", Code::Kind2String(compiled_code_->kind()));
+ }
}
@@ -473,45 +634,45 @@ void Deoptimizer::DeleteFrameDescriptions() {
}
-Address Deoptimizer::GetDeoptimizationEntry(int id,
+Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
+ int id,
BailoutType type,
GetEntryMode mode) {
ASSERT(id >= 0);
if (id >= kMaxNumberOfEntries) return NULL;
- VirtualMemory* base = NULL;
+ MemoryChunk* base = NULL;
if (mode == ENSURE_ENTRY_CODE) {
- EnsureCodeForDeoptimizationEntry(type, id);
+ EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
}
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
- return
- static_cast<Address>(base->address()) + (id * table_entry_size_);
+ return base->area_start() + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- VirtualMemory* base = NULL;
+ MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
- Address base_casted = reinterpret_cast<Address>(base->address());
+ Address start = base->area_start();
if (base == NULL ||
- addr < base->address() ||
- addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
+ addr < start ||
+ addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base_casted) % table_entry_size_);
- return static_cast<int>(addr - base_casted) / table_entry_size_;
+ static_cast<int>(addr - start) % table_entry_size_);
+ return static_cast<int>(addr - start) / table_entry_size_;
}
@@ -535,7 +696,7 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString());
- UNREACHABLE();
+ FATAL("unable to find pc offset during deoptimization");
return -1;
}
@@ -562,18 +723,18 @@ void Deoptimizer::DoComputeOutputFrames() {
// Print some helpful diagnostic information.
int64_t start = OS::Ticks();
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
(bailout_type_ == LAZY ? " (lazy)" : ""),
reinterpret_cast<intptr_t>(function_));
- function_->PrintName();
+ PrintFunctionName();
PrintF(" @%d]\n", bailout_id_);
}
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
- DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
+ DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
@@ -618,6 +779,9 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::SETTER_STUB_FRAME:
DoComputeAccessorStubFrame(&iterator, i, true);
break;
+ case Translation::COMPILED_STUB_FRAME:
+ DoComputeCompiledStubFrame(&iterator, i);
+ break;
case Translation::BEGIN:
case Translation::REGISTER:
case Translation::INT32_REGISTER:
@@ -630,19 +794,20 @@ void Deoptimizer::DoComputeOutputFrames() {
case Translation::LITERAL:
case Translation::ARGUMENTS_OBJECT:
case Translation::DUPLICATE:
+ default:
UNREACHABLE();
break;
}
}
// Print some helpful diagnostic information.
- if (FLAG_trace_deopt) {
+ if (trace_) {
double ms = static_cast<double>(OS::Ticks() - start) / 1000;
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
reinterpret_cast<intptr_t>(function));
- function->PrintName();
+ if (function != NULL) function->PrintName();
PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s,"
" took %0.3f ms]\n",
node_id.ToInt(),
@@ -656,13 +821,234 @@ void Deoptimizer::DoComputeOutputFrames() {
}
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (trace_) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ intptr_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc_value);
+}
+
+
+void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
+ int frame_index,
+ bool is_setter_stub_frame) {
+ JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ // The receiver (and the implicit return value, if any) are expected in
+ // registers by the LoadIC/StoreIC, so they don't belong to the output stack
+ // frame. This means that we have to use a height of 0.
+ unsigned height = 0;
+ unsigned height_in_bytes = height * kPointerSize;
+ const char* kind = is_setter_stub_frame ? "setter" : "getter";
+ if (trace_) {
+ PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
+ }
+
+ // We need 1 stack entry for the return address + 4 stack entries from
+ // StackFrame::INTERNAL (FP, context, frame type, code object, see
+ // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
+ // entry for the implicit return value, see
+ // StoreStubCompiler::CompileStoreViaSetter.
+ unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
+ unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, accessor);
+ output_frame->SetFrameType(StackFrame::INTERNAL);
+
+ // A frame for an accessor stub can not be the topmost or bottommost one.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous frame's top and
+ // this frame's size.
+ intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ unsigned output_offset = output_frame_size;
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the previous frame.
+ output_offset -= kPointerSize;
+ value = output_[frame_index - 1]->GetContext();
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // A marker value is used in place of the function.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; function (%s sentinel)\n",
+ top_address + output_offset, output_offset, value, kind);
+ }
+
+ // Get Code object from accessor stub.
+ output_offset -= kPointerSize;
+ Builtins::Name name = is_setter_stub_frame ?
+ Builtins::kStoreIC_Setter_ForDeopt :
+ Builtins::kLoadIC_Getter_ForDeopt;
+ Code* accessor_stub = isolate_->builtins()->builtin(name);
+ value = reinterpret_cast<intptr_t>(accessor_stub);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
+ " ; code object\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Skip receiver.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ iterator->Skip(Translation::NumberOfOperandsFor(opcode));
+
+ if (is_setter_stub_frame) {
+ // The implicit return value was part of the artificial setter stub
+ // environment.
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Smi* offset = is_setter_stub_frame ?
+ isolate_->heap()->setter_stub_deopt_pc_offset() :
+ isolate_->heap()->getter_stub_deopt_pc_offset();
+ intptr_t pc = reinterpret_cast<intptr_t>(
+ accessor_stub->instruction_start() + offset->value());
+ output_frame->SetPc(pc);
+}
+
+
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
ASSERT_NE(DEBUGGER, bailout_type_);
// Handlify all argument object values before triggering any allocation.
List<Handle<Object> > values(deferred_arguments_objects_values_.length());
for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) {
- values.Add(Handle<Object>(deferred_arguments_objects_values_[i]));
+ values.Add(Handle<Object>(deferred_arguments_objects_values_[i],
+ isolate_));
}
// Play it safe and clear all unhandlified values before we continue.
@@ -674,7 +1060,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
Handle<Object> num = isolate_->factory()->NewNumber(d.value());
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p\n",
reinterpret_cast<void*>(*num),
d.value(),
@@ -719,9 +1105,10 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
}
frame->SetExpression(i, *arguments);
ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments);
- if (FLAG_trace_deopt) {
- PrintF("Materializing %sarguments object for %p: ",
+ if (trace_) {
+ PrintF("Materializing %sarguments object of length %d for %p: ",
frame->has_adapted_arguments() ? "(adapted) " : "",
+ arguments->elements()->length(),
reinterpret_cast<void*>(descriptor.slot_address()));
arguments->ShortPrint();
PrintF("\n");
@@ -754,7 +1141,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = (info->parameters_count() - 1) -
static_cast<int>(slot - parameters_top) / kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for parameter slot #%d\n",
reinterpret_cast<void*>(*num),
@@ -770,7 +1157,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame(
int index = info->expression_count() - 1 -
static_cast<int>(slot - expressions_top) / kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF("Materializing a new heap number %p [%e] in slot %p"
"for expression slot #%d\n",
reinterpret_cast<void*>(*num),
@@ -809,6 +1196,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE();
return;
@@ -816,7 +1204,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::REGISTER: {
int input_reg = iterator->Next();
intptr_t input_value = input_->GetRegister(input_reg);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ",
output_[frame_index]->GetTop() + output_offset,
@@ -834,7 +1222,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_reg = iterator->Next();
intptr_t value = input_->GetRegister(input_reg);
bool is_smi = Smi::IsValid(value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
output_[frame_index]->GetTop() + output_offset,
@@ -861,7 +1249,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
int input_reg = iterator->Next();
uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(
" 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR
" ; uint %s (%s)\n",
@@ -888,7 +1276,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::DOUBLE_REGISTER: {
int input_reg = iterator->Next();
double value = input_->GetDoubleRegister(input_reg);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
@@ -907,7 +1295,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t input_value = input_->GetFrameSlot(input_offset);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ",
@@ -927,7 +1315,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
input_->GetOffsetFromSlotIndex(input_slot_index);
intptr_t value = input_->GetFrameSlot(input_offset);
bool is_smi = Smi::IsValid(value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n",
@@ -957,7 +1345,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
uintptr_t value =
static_cast<uintptr_t>(input_->GetFrameSlot(input_offset));
bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue));
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": ",
output_[frame_index]->GetTop() + output_offset);
PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n",
@@ -985,7 +1373,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
unsigned input_offset =
input_->GetOffsetFromSlotIndex(input_slot_index);
double value = input_->GetDoubleFrameSlot(input_offset);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n",
output_[frame_index]->GetTop() + output_offset,
output_offset,
@@ -1001,7 +1389,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
case Translation::LITERAL: {
Object* literal = ComputeLiteral(iterator->Next());
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
@@ -1014,14 +1402,15 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
case Translation::ARGUMENTS_OBJECT: {
+ bool args_known = iterator->Next();
int args_index = iterator->Next() + 1; // Skip receiver.
int args_length = iterator->Next() - 1; // Skip receiver.
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
output_[frame_index]->GetTop() + output_offset,
output_offset);
isolate_->heap()->arguments_marker()->ShortPrint();
- PrintF(" ; arguments object\n");
+ PrintF(" ; %sarguments object\n", args_known ? "" : "dummy ");
}
// Use the arguments marker value as a sentinel and fill in the arguments
// object after the deoptimized frame is built.
@@ -1034,7 +1423,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
// actual arguments object after the deoptimized frame is built.
for (int i = 0; i < args_length; i++) {
unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i);
- intptr_t input_value = input_->GetFrameSlot(input_offset);
+ intptr_t input_value = args_known
+ ? input_->GetFrameSlot(input_offset)
+ : reinterpret_cast<intptr_t>(isolate_->heap()->the_hole_value());
AddArgumentsObjectValue(input_value);
}
return;
@@ -1117,6 +1508,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
case Translation::CONSTRUCT_STUB_FRAME:
case Translation::GETTER_STUB_FRAME:
case Translation::SETTER_STUB_FRAME:
+ case Translation::COMPILED_STUB_FRAME:
case Translation::DUPLICATE:
UNREACHABLE(); // Malformed input.
return false;
@@ -1336,8 +1728,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// size matches with the stack height we can compute based on the
// environment at the OSR entry. The code for that his built into
// the DoComputeOsrOutputFrame function for now.
- } else {
- unsigned stack_slots = optimized_code_->stack_slots();
+ } else if (compiled_code_->kind() != Code::COMPILED_STUB) {
+ unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size = ComputeOutgoingArgumentSize();
ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
}
@@ -1357,6 +1749,10 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
// The incoming arguments is the values for formal parameters and
// the receiver. Every slot contains a pointer.
+ if (function->IsSmi()) {
+ ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB));
+ return 0;
+ }
unsigned arguments = function->shared()->formal_parameter_count() + 1;
return arguments * kPointerSize;
}
@@ -1364,7 +1760,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
return height * kPointerSize;
}
@@ -1372,7 +1768,7 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
Object* Deoptimizer::ComputeLiteral(int index) const {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
FixedArray* literals = data->LiteralArray();
return literals->get(index);
}
@@ -1397,38 +1793,38 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
}
-void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
+void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
+ BailoutType type,
int max_entry_id) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
- ASSERT(!Serializer::enabled());
-
ASSERT(type == EAGER || type == LAZY);
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
+ DeoptimizerData* data = isolate->deoptimizer_data();
int entry_count = (type == EAGER)
? data->eager_deoptimization_entry_code_entries_
: data->lazy_deoptimization_entry_code_entries_;
if (max_entry_id < entry_count) return;
- entry_count = Min(Max(entry_count * 2, Deoptimizer::kMinNumberOfEntries),
- Deoptimizer::kMaxNumberOfEntries);
+ entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
+ while (max_entry_id >= entry_count) entry_count *= 2;
+ ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
- MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
+ MacroAssembler masm(isolate, NULL, 16 * KB);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
- VirtualMemory* memory = type == EAGER
+ MemoryChunk* chunk = type == EAGER
? data->eager_deoptimization_entry_code_
: data->lazy_deoptimization_entry_code_;
- size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
- ASSERT(static_cast<int>(table_size) >= desc.instr_size);
- memory->Commit(memory->address(), table_size, true);
- memcpy(memory->address(), desc.buffer, desc.instr_size);
- CPU::FlushICache(memory->address(), desc.instr_size);
+ ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
+ desc.instr_size);
+ chunk->CommitArea(desc.instr_size);
+ memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(chunk->area_start(), desc.instr_size);
if (type == EAGER) {
data->eager_deoptimization_entry_code_entries_ = entry_count;
@@ -1438,82 +1834,11 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
}
-Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
- DeoptimizingCodeListNode* node =
- Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
- while (node != NULL) {
- if (node->code()->contains(addr)) return *node->code();
- node = node->next();
- }
- return NULL;
-}
-
-
-void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- ASSERT(data->deoptimizing_code_list_ != NULL);
- // Run through the code objects to find this one and remove it.
- DeoptimizingCodeListNode* prev = NULL;
- DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
- while (current != NULL) {
- if (*current->code() == code) {
- // Unlink from list. If prev is NULL we are looking at the first element.
- if (prev == NULL) {
- data->deoptimizing_code_list_ = current->next();
- } else {
- prev->set_next(current->next());
- }
- delete current;
- return;
- }
- // Move to next in list.
- prev = current;
- current = current->next();
- }
- // Deoptimizing code is removed through weak callback. Each object is expected
- // to be removed once and only once.
- UNREACHABLE();
-}
-
-
-static Object* CutOutRelatedFunctionsList(Context* context,
- Code* code,
- Object* undefined) {
- Object* result_list_head = undefined;
- Object* head;
- Object* current;
- current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST);
- JSFunction* prev = NULL;
- while (current != undefined) {
- JSFunction* func = JSFunction::cast(current);
- current = func->next_function_link();
- if (func->code() == code) {
- func->set_next_function_link(result_list_head);
- result_list_head = func;
- if (prev) {
- prev->set_next_function_link(current);
- } else {
- head = current;
- }
- } else {
- prev = func;
- }
- }
- if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) {
- context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head);
- }
- return result_list_head;
-}
-
-
void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function,
Code* code) {
- Context* context = function->context()->native_context();
-
SharedFunctionInfo* shared = function->shared();
-
Object* undefined = Isolate::Current()->heap()->undefined_value();
- Object* current = CutOutRelatedFunctionsList(context, code, undefined);
+ Object* current = function;
while (current != undefined) {
JSFunction* func = JSFunction::cast(current);
@@ -1574,6 +1899,8 @@ int FrameDescription::ComputeParametersCount() {
// Can't use GetExpression(0) because it would cause infinite recursion.
return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value();
}
+ case StackFrame::STUB:
+ return -1; // Minus receiver.
default:
UNREACHABLE();
return 0;
@@ -1681,6 +2008,11 @@ void Translation::BeginJSFrame(BailoutId node_id,
}
+void Translation::BeginCompiledStubFrame() {
+ buffer_->Add(COMPILED_STUB_FRAME, zone());
+}
+
+
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER, zone());
buffer_->Add(reg.code(), zone());
@@ -1735,8 +2067,11 @@ void Translation::StoreLiteral(int literal_id) {
}
-void Translation::StoreArgumentsObject(int args_index, int args_length) {
+void Translation::StoreArgumentsObject(bool args_known,
+ int args_index,
+ int args_length) {
buffer_->Add(ARGUMENTS_OBJECT, zone());
+ buffer_->Add(args_known, zone());
buffer_->Add(args_index, zone());
buffer_->Add(args_length, zone());
}
@@ -1762,13 +2097,14 @@ int Translation::NumberOfOperandsFor(Opcode opcode) {
case UINT32_STACK_SLOT:
case DOUBLE_STACK_SLOT:
case LITERAL:
+ case COMPILED_STUB_FRAME:
return 1;
case BEGIN:
case ARGUMENTS_ADAPTOR_FRAME:
case CONSTRUCT_STUB_FRAME:
- case ARGUMENTS_OBJECT:
return 2;
case JS_FRAME:
+ case ARGUMENTS_OBJECT:
return 3;
}
UNREACHABLE();
@@ -1792,6 +2128,8 @@ const char* Translation::StringFor(Opcode opcode) {
return "GETTER_STUB_FRAME";
case SETTER_STUB_FRAME:
return "SETTER_STUB_FRAME";
+ case COMPILED_STUB_FRAME:
+ return "COMPILED_STUB_FRAME";
case REGISTER:
return "REGISTER";
case INT32_REGISTER:
@@ -1828,6 +2166,7 @@ DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
code_ = Handle<Code>::cast(global_handles->Create(code));
global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
this,
+ NULL,
Deoptimizer::HandleWeakDeoptimizedCode);
}
@@ -1897,8 +2236,13 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
case Translation::LITERAL: {
int literal_index = iterator->Next();
- return SlotRef(data->LiteralArray()->get(literal_index));
+ return SlotRef(data->GetIsolate(),
+ data->LiteralArray()->get(literal_index));
}
+
+ case Translation::COMPILED_STUB_FRAME:
+ UNREACHABLE();
+ break;
}
UNREACHABLE();
diff --git a/src/3rdparty/v8/src/deoptimizer.h b/src/3rdparty/v8/src/deoptimizer.h
index 4aa38ce..b4d8873 100644
--- a/src/3rdparty/v8/src/deoptimizer.h
+++ b/src/3rdparty/v8/src/deoptimizer.h
@@ -87,6 +87,14 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
};
+class OptimizedFunctionFilter BASE_EMBEDDED {
+ public:
+ virtual ~OptimizedFunctionFilter() {}
+
+ virtual bool TakeFunction(JSFunction* function) = 0;
+};
+
+
class Deoptimizer;
@@ -99,11 +107,14 @@ class DeoptimizerData {
void Iterate(ObjectVisitor* v);
#endif
+ Code* FindDeoptimizingCode(Address addr);
+ void RemoveDeoptimizingCode(Code* code);
+
private:
int eager_deoptimization_entry_code_entries_;
int lazy_deoptimization_entry_code_entries_;
- VirtualMemory* eager_deoptimization_entry_code_;
- VirtualMemory* lazy_deoptimization_entry_code_;
+ MemoryChunk* eager_deoptimization_entry_code_;
+ MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -133,8 +144,14 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
+ static bool TraceEnabledFor(BailoutType deopt_type,
+ StackFrame::Type frame_type);
+ static const char* MessageFor(BailoutType type);
+
int output_count() const { return output_count_; }
+ Code::Kind compiled_code_kind() const { return compiled_code_->kind(); }
+
// Number of created JS frames. Not all created frames are necessarily JS.
int jsframe_count() const { return jsframe_count_; }
@@ -177,12 +194,14 @@ class Deoptimizer : public Malloced {
static void DeoptimizeGlobalObject(JSObject* object);
+ static void DeoptimizeAllFunctionsWith(OptimizedFunctionFilter* filter);
+
+ static void DeoptimizeAllFunctionsForContext(
+ Context* context, OptimizedFunctionFilter* filter);
+
static void VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor);
- static void VisitAllOptimizedFunctionsForGlobalObject(
- JSObject* object, OptimizedFunctionVisitor* visitor);
-
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
// The size in bytes of the code required at a lazy deopt patch site.
@@ -236,6 +255,7 @@ class Deoptimizer : public Malloced {
static Address GetDeoptimizationEntry(
+ Isolate* isolate,
int id,
BailoutType type,
GetEntryMode mode = ENSURE_ENTRY_CODE);
@@ -297,6 +317,10 @@ class Deoptimizer : public Malloced {
static size_t GetMaxDeoptTableSize();
+ static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
+ BailoutType type,
+ int max_entry_id);
+
private:
static const int kMinNumberOfEntries = 64;
static const int kMaxNumberOfEntries = 16384;
@@ -308,6 +332,9 @@ class Deoptimizer : public Malloced {
Address from,
int fp_to_sp_delta,
Code* optimized_code);
+ Code* FindOptimizedCode(JSFunction* function, Code* optimized_code);
+ void Trace();
+ void PrintFunctionName();
void DeleteFrameDescriptions();
void DoComputeOutputFrames();
@@ -320,6 +347,8 @@ class Deoptimizer : public Malloced {
void DoComputeAccessorStubFrame(TranslationIterator* iterator,
int frame_index,
bool is_setter_stub_frame);
+ void DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index);
void DoTranslateCommand(TranslationIterator* iterator,
int frame_index,
unsigned output_offset);
@@ -342,16 +371,17 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
- static void EnsureCodeForDeoptimizationEntry(BailoutType type,
- int max_entry_id);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
// Weak handle callback for deoptimizing code objects.
- static void HandleWeakDeoptimizedCode(
- v8::Persistent<v8::Value> obj, void* data);
- static Code* FindDeoptimizingCodeFromAddress(Address addr);
- static void RemoveDeoptimizingCode(Code* code);
+ static void HandleWeakDeoptimizedCode(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data);
+
+ // Deoptimize function assuming that function->next_function_link() points
+ // to a list that contains all functions that share the same optimized code.
+ static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function);
// Fill the input from from a JavaScript frame. This is used when
// the debugger needs to inspect an optimized frame. For normal
@@ -360,7 +390,7 @@ class Deoptimizer : public Malloced {
Isolate* isolate_;
JSFunction* function_;
- Code* optimized_code_;
+ Code* compiled_code_;
unsigned bailout_id_;
BailoutType bailout_type_;
Address from_;
@@ -380,6 +410,8 @@ class Deoptimizer : public Malloced {
List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_;
List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_;
+ bool trace_;
+
static const int table_entry_size_;
friend class FrameDescription;
@@ -530,16 +562,13 @@ class FrameDescription {
uintptr_t frame_size_; // Number of bytes.
JSFunction* function_;
intptr_t registers_[Register::kNumRegisters];
- double double_registers_[DoubleRegister::kNumAllocatableRegisters];
+ double double_registers_[DoubleRegister::kMaxNumRegisters];
intptr_t top_;
intptr_t pc_;
intptr_t fp_;
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
-#ifdef DEBUG
- Code::Kind kind_;
-#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
@@ -603,6 +632,7 @@ class Translation BASE_EMBEDDED {
GETTER_STUB_FRAME,
SETTER_STUB_FRAME,
ARGUMENTS_ADAPTOR_FRAME,
+ COMPILED_STUB_FRAME,
REGISTER,
INT32_REGISTER,
UINT32_REGISTER,
@@ -633,6 +663,7 @@ class Translation BASE_EMBEDDED {
// Commands.
void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height);
+ void BeginCompiledStubFrame();
void BeginArgumentsAdaptorFrame(int literal_id, unsigned height);
void BeginConstructStubFrame(int literal_id, unsigned height);
void BeginGetterStubFrame(int literal_id);
@@ -646,7 +677,7 @@ class Translation BASE_EMBEDDED {
void StoreUint32StackSlot(int index);
void StoreDoubleStackSlot(int index);
void StoreLiteral(int literal_id);
- void StoreArgumentsObject(int args_index, int args_length);
+ void StoreArgumentsObject(bool args_known, int args_index, int args_length);
void MarkDuplicate();
Zone* zone() const { return zone_; }
@@ -704,36 +735,35 @@ class SlotRef BASE_EMBEDDED {
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
- explicit SlotRef(Object* literal)
- : literal_(literal), representation_(LITERAL) { }
+ SlotRef(Isolate* isolate, Object* literal)
+ : literal_(literal, isolate), representation_(LITERAL) { }
- Handle<Object> GetValue() {
+ Handle<Object> GetValue(Isolate* isolate) {
switch (representation_) {
case TAGGED:
- return Handle<Object>(Memory::Object_at(addr_));
+ return Handle<Object>(Memory::Object_at(addr_), isolate);
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
- return Handle<Object>(Smi::FromInt(value));
+ return Handle<Object>(Smi::FromInt(value), isolate);
} else {
- return Isolate::Current()->factory()->NewNumberFromInt(value);
+ return isolate->factory()->NewNumberFromInt(value);
}
}
case UINT32: {
uint32_t value = Memory::uint32_at(addr_);
if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
- return Handle<Object>(Smi::FromInt(static_cast<int>(value)));
+ return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
} else {
- return Isolate::Current()->factory()->NewNumber(
- static_cast<double>(value));
+ return isolate->factory()->NewNumber(static_cast<double>(value));
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
- return Isolate::Current()->factory()->NewNumber(value);
+ return isolate->factory()->NewNumber(value);
}
case LITERAL:
diff --git a/src/3rdparty/v8/src/disassembler.cc b/src/3rdparty/v8/src/disassembler.cc
index 9f8b9a8..5d18d68 100644
--- a/src/3rdparty/v8/src/disassembler.cc
+++ b/src/3rdparty/v8/src/disassembler.cc
@@ -111,11 +111,12 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
-static int DecodeIt(FILE* f,
+static int DecodeIt(Isolate* isolate,
+ FILE* f,
const V8NameConverter& converter,
byte* begin,
byte* end) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
AssertNoAllocation no_alloc;
ExternalReferenceEncoder ref_encoder;
Heap* heap = HEAP;
@@ -282,12 +283,17 @@ static int DecodeIt(FILE* f,
out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data()));
}
} else if (rmode == RelocInfo::RUNTIME_ENTRY &&
- Isolate::Current()->deoptimizer_data() != NULL) {
+ isolate->deoptimizer_data() != NULL) {
// A runtime entry reloinfo might be a deoptimization bailout.
Address addr = relocinfo.target_address();
int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER);
if (id == Deoptimizer::kNotDeoptimizationEntry) {
- out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::LAZY);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode));
+ } else {
+ out.AddFormatted(" ;; lazy deoptimization bailout %d", id);
+ }
} else {
out.AddFormatted(" ;; deoptimization bailout %d", id);
}
@@ -314,15 +320,17 @@ static int DecodeIt(FILE* f,
}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) {
+int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
V8NameConverter defaultConverter(NULL);
- return DecodeIt(f, defaultConverter, begin, end);
+ return DecodeIt(isolate, f, defaultConverter, begin, end);
}
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
- int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
+ Isolate* isolate = code->GetIsolate();
+ int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
@@ -334,13 +342,15 @@ void Disassembler::Decode(FILE* f, Code* code) {
byte* begin = code->instruction_start();
byte* end = begin + decode_size;
V8NameConverter v8NameConverter(code);
- DecodeIt(f, v8NameConverter, begin, end);
+ DecodeIt(isolate, f, v8NameConverter, begin, end);
}
#else // ENABLE_DISASSEMBLER
void Disassembler::Dump(FILE* f, byte* begin, byte* end) {}
-int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; }
+int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) {
+ return 0;
+}
void Disassembler::Decode(FILE* f, Code* code) {}
#endif // ENABLE_DISASSEMBLER
diff --git a/src/3rdparty/v8/src/disassembler.h b/src/3rdparty/v8/src/disassembler.h
index 4a87dca..8789150 100644
--- a/src/3rdparty/v8/src/disassembler.h
+++ b/src/3rdparty/v8/src/disassembler.h
@@ -41,7 +41,7 @@ class Disassembler : public AllStatic {
// Decode instructions in the the interval [begin, end) and print the
// code into f. Returns the number of bytes disassembled or 1 if no
// instruction could be decoded.
- static int Decode(FILE* f, byte* begin, byte* end);
+ static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end);
// Decode instructions in code.
static void Decode(FILE* f, Code* code);
diff --git a/src/3rdparty/v8/src/elements.cc b/src/3rdparty/v8/src/elements.cc
index 8cb48c6..9deef60 100644
--- a/src/3rdparty/v8/src/elements.cc
+++ b/src/3rdparty/v8/src/elements.cc
@@ -27,10 +27,11 @@
#include "v8.h"
+#include "arguments.h"
#include "objects.h"
#include "elements.h"
#include "utils.h"
-
+#include "v8conversions.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
// several abstract ElementsAccessor classes are used to allow sharing
@@ -146,33 +147,36 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
-void CopyObjectToObjectElements(FixedArray* from,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- ASSERT(to->map() != HEAP->fixed_cow_array_map());
+static void CopyObjectToObjectElements(FixedArrayBase* from_base,
+ ElementsKind from_kind,
+ uint32_t from_start,
+ FixedArrayBase* to_base,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
+ ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
+ AssertNoAllocation no_allocation;
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
-#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ int start = to_start + copy_size;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from_base->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedArray* to = FixedArray::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@@ -193,31 +197,34 @@ void CopyObjectToObjectElements(FixedArray* from,
}
-static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
+static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedArray* to,
+ FixedArrayBase* to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
+ SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
+ AssertNoAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
-#ifdef DEBUG
- // Fast object arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ int start = to_start + copy_size;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT(to != from);
+ ASSERT(to_base != from_base);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
+ FixedArray* to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -244,9 +251,9 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedDoubleArray* from,
+ FixedArrayBase* from_base,
uint32_t from_start,
- FixedArray* to,
+ FixedArrayBase* to_base,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -255,21 +262,26 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
-#ifdef DEBUG
- // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
- // marked with the hole.
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- ASSERT(to->get(i)->IsTheHole());
+ // Also initialize the area that will be copied over since HeapNumber
+ // allocation below can cause an incremental marking step, requiring all
+ // existing heap objects to be propertly initialized.
+ int start = to_start;
+ int length = to_base->length() - start;
+ if (length > 0) {
+ Heap* heap = from_base->GetHeap();
+ MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
+ heap->the_hole_value(), length);
}
}
-#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
- if (copy_size == 0) return from;
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ if (copy_size == 0) return from_base;
+ FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
+ FixedArray* to = FixedArray::cast(to_base);
for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
@@ -298,26 +310,28 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
}
-static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
+static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from->length() - from_start,
- to->length() - to_start);
+ copy_size = Min(from_base->length() - from_start,
+ to_base->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -329,25 +343,27 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
}
-static void CopySmiToDoubleElements(FixedArray* from,
+static void CopySmiToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
@@ -361,9 +377,9 @@ static void CopySmiToDoubleElements(FixedArray* from,
}
-static void CopyPackedSmiToDoubleElements(FixedArray* from,
+static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
@@ -372,52 +388,55 @@ static void CopyPackedSmiToDoubleElements(FixedArray* from,
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = packed_size - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to->length();
+ to_end = to_base->length();
+ for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ }
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
- ASSERT(static_cast<int>(to_end) <= to->length());
+ ASSERT(static_cast<int>(to_end) <= to_base->length());
ASSERT(packed_size >= 0 && packed_size <= copy_size);
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
ASSERT(!smi->IsTheHole());
to->set(to_start, Smi::cast(smi)->value());
}
-
- while (to_start < to_end) {
- to->set_the_hole(to_start++);
- }
}
-static void CopyObjectToDoubleElements(FixedArray* from,
+static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from->length() - from_start;
+ copy_size = from_base->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
- (copy_size + static_cast<int>(from_start)) <= from->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from_base->length());
if (copy_size == 0) return;
+ FixedArray* from = FixedArray::cast(from_base);
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
@@ -431,23 +450,25 @@ static void CopyObjectToDoubleElements(FixedArray* from,
}
-static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
+static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
uint32_t from_start,
- FixedDoubleArray* to,
+ FixedArrayBase* to_base,
uint32_t to_start,
int raw_copy_size) {
+ SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to->length(); ++i) {
- to->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to_base->length(); ++i) {
+ FixedDoubleArray::cast(to_base)->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
+ FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -463,6 +484,66 @@ static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
}
+static void TraceTopFrame(Isolate* isolate) {
+ StackFrameIterator it(isolate);
+ if (it.done()) {
+ PrintF("unknown location (no JavaScript frames present)");
+ return;
+ }
+ StackFrame* raw_frame = it.frame();
+ if (raw_frame->is_internal()) {
+ Isolate* isolate = Isolate::Current();
+ Code* apply_builtin = isolate->builtins()->builtin(
+ Builtins::kFunctionApply);
+ if (raw_frame->unchecked_code() == apply_builtin) {
+ PrintF("apply from ");
+ it.Advance();
+ raw_frame = it.frame();
+ }
+ }
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
+}
+
+
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+ bool allow_appending) {
+ Object* raw_length = NULL;
+ const char* elements_type = "array";
+ if (obj->IsJSArray()) {
+ JSArray* array = JSArray::cast(obj);
+ raw_length = array->length();
+ } else {
+ raw_length = Smi::FromInt(obj->elements()->length());
+ elements_type = "object";
+ }
+
+ if (raw_length->IsNumber()) {
+ double n = raw_length->Number();
+ if (FastI2D(FastD2UI(n)) == n) {
+ int32_t int32_length = DoubleToInt32(n);
+ uint32_t compare_length = static_cast<uint32_t>(int32_length);
+ if (allow_appending) compare_length++;
+ if (key >= compare_length) {
+ PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ",
+ elements_type, op, elements_type,
+ static_cast<int>(int32_length),
+ static_cast<int>(key));
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+ } else {
+ PrintF("[%s elements length not integer value in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+ } else {
+ PrintF("[%s elements length not a number in ", elements_type);
+ TraceTopFrame(obj->GetIsolate());
+ PrintF("]\n");
+ }
+}
+
+
// Base class for element handler implementations. Contains the
// the common logic for objects with different ElementsKinds.
// Subclasses must specialize method for which the element
@@ -527,7 +608,7 @@ class ElementsAccessorBase : public ElementsAccessor {
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return ElementsAccessorSubclass::GetAttributesImpl(
receiver, holder, key, backing_store) != ABSENT;
}
@@ -540,7 +621,7 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, BackingStore::cast(backing_store));
+ receiver, holder, key, backing_store);
}
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
@@ -550,16 +631,27 @@ class ElementsAccessorBase : public ElementsAccessor {
if (backing_store == NULL) {
backing_store = holder->elements();
}
+
+ if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+ FLAG_trace_js_array_abuse) {
+ CheckArrayAbuse(holder, "elements read", key);
+ }
+
+ if (IsExternalArrayElementsKind(ElementsTraits::Kind) &&
+ FLAG_trace_external_array_abuse) {
+ CheckArrayAbuse(holder, "external elements read", key);
+ }
+
return ElementsAccessorSubclass::GetImpl(
- receiver, holder, key, BackingStore::cast(backing_store));
+ receiver, holder, key, backing_store);
}
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
- ? backing_store->get(key)
+ ? BackingStore::cast(backing_store)->get(key)
: backing_store->GetHeap()->the_hole_value();
}
@@ -572,30 +664,74 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, BackingStore::cast(backing_store));
+ receiver, holder, key, backing_store);
}
MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
return ABSENT;
}
- return backing_store->is_the_hole(key) ? ABSENT : NONE;
+ return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
+ }
+
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetTypeImpl(
+ receiver, holder, key, backing_store);
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
+ return NONEXISTENT;
+ }
+ return BackingStore::cast(backing_store)->is_the_hole(key)
+ ? NONEXISTENT : FIELD;
+ }
+
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ if (backing_store == NULL) {
+ backing_store = holder->elements();
+ }
+ return ElementsAccessorSubclass::GetAccessorPairImpl(
+ receiver, holder, key, backing_store);
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ return NULL;
}
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, BackingStore::cast(array->elements()));
+ array, length, array->elements());
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- BackingStore* backing_store);
+ FixedArrayBase* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
@@ -622,7 +758,7 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
@@ -632,8 +768,8 @@ class ElementsAccessorBase : public ElementsAccessor {
MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder,
uint32_t from_start,
+ ElementsKind from_kind,
FixedArrayBase* to,
- ElementsKind to_kind,
uint32_t to_start,
int copy_size,
FixedArrayBase* from) {
@@ -643,8 +779,7 @@ class ElementsAccessorBase : public ElementsAccessor {
}
if (from_holder) {
- ElementsKind elements_kind = from_holder->GetElementsKind();
- bool is_packed = IsFastPackedElementsKind(elements_kind) &&
+ bool is_packed = IsFastPackedElementsKind(from_kind) &&
from_holder->IsJSArray();
if (is_packed) {
packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value();
@@ -653,11 +788,8 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
}
- if (from->length() == 0) {
- return from;
- }
return ElementsAccessorSubclass::CopyElementsImpl(
- from, from_start, to, to_kind, to_start, packed_size, copy_size);
+ from, from_start, to, from_kind, to_start, packed_size, copy_size);
}
MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray(
@@ -676,25 +808,22 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = holder->elements();
}
- BackingStore* backing_store = BackingStore::cast(from);
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
+ uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
if (len1 == 0) return to;
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
- uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+ uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store)) {
+ receiver, holder, key, from)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder,
- key, backing_store);
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!maybe_value->To(&value)) return maybe_value;
ASSERT(!value->IsTheHole());
if (!HasKey(to, value)) {
extra++;
@@ -706,9 +835,8 @@ class ElementsAccessorBase : public ElementsAccessor {
// Allocate the result
FixedArray* result;
- MaybeObject* maybe_obj =
- backing_store->GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj;
+ MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
+ if (!maybe_obj->To(&result)) return maybe_obj;
// Fill in the content
{
@@ -724,14 +852,13 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
+ ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store)) {
+ receiver, holder, key, from)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder,
- key, backing_store);
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
Object* value;
- if (!maybe_value->ToObject(&value)) return maybe_value;
+ if (!maybe_value->To(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) {
result->set(len0 + index, value);
index++;
@@ -743,24 +870,22 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
- static uint32_t GetCapacityImpl(BackingStore* backing_store) {
+ static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
return backing_store->length();
}
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetCapacityImpl(
- BackingStore::cast(backing_store));
+ return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
}
- static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
uint32_t index) {
return index;
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) {
- return ElementsAccessorSubclass::GetKeyForIndexImpl(
- BackingStore::cast(backing_store), index);
+ return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
}
private:
@@ -786,7 +911,7 @@ class FastElementsAccessor
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
+ static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
JSArray* array,
Object* length_object,
uint32_t length) {
@@ -824,7 +949,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- backing_store->set_the_hole(i);
+ BackingStore::cast(backing_store)->set_the_hole(i);
}
}
return length_object;
@@ -861,9 +986,8 @@ class FastElementsAccessor
bool is_non_strict_arguments_elements_map =
backing_store->map() == heap->non_strict_arguments_elements_map();
if (is_non_strict_arguments_elements_map) {
- backing_store =
- KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ backing_store = KindTraits::BackingStore::cast(
+ FixedArray::cast(backing_store)->get(1));
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
@@ -919,11 +1043,11 @@ class FastElementsAccessor
Object* receiver,
JSObject* holder,
uint32_t key,
- typename KindTraits::BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
- return !backing_store->is_the_hole(key);
+ return !BackingStore::cast(backing_store)->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
@@ -950,6 +1074,41 @@ class FastElementsAccessor
};
+static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) {
+ switch (array->map()->instance_type()) {
+ case FIXED_ARRAY_TYPE:
+ if (array->IsDictionary()) {
+ return DICTIONARY_ELEMENTS;
+ } else {
+ return FAST_HOLEY_ELEMENTS;
+ }
+ case FIXED_DOUBLE_ARRAY_TYPE:
+ return FAST_HOLEY_DOUBLE_ELEMENTS;
+ case EXTERNAL_BYTE_ARRAY_TYPE:
+ return EXTERNAL_BYTE_ELEMENTS;
+ case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+ case EXTERNAL_SHORT_ARRAY_TYPE:
+ return EXTERNAL_SHORT_ELEMENTS;
+ case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+ case EXTERNAL_INT_ARRAY_TYPE:
+ return EXTERNAL_INT_ELEMENTS;
+ case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
+ return EXTERNAL_UNSIGNED_INT_ELEMENTS;
+ case EXTERNAL_FLOAT_ARRAY_TYPE:
+ return EXTERNAL_FLOAT_ELEMENTS;
+ case EXTERNAL_DOUBLE_ARRAY_TYPE:
+ return EXTERNAL_DOUBLE_ELEMENTS;
+ case EXTERNAL_PIXEL_ARRAY_TYPE:
+ return EXTERNAL_PIXEL_ELEMENTS;
+ default:
+ UNREACHABLE();
+ }
+ return FAST_HOLEY_ELEMENTS;
+}
+
+
template<typename FastElementsAccessorSubclass,
typename KindTraits>
class FastSmiOrObjectElementsAccessor
@@ -965,36 +1124,49 @@ class FastSmiOrObjectElementsAccessor
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- if (IsFastSmiOrObjectElementsKind(to_kind)) {
- CopyObjectToObjectElements(
- FixedArray::cast(from), KindTraits::Kind, from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- } else if (IsFastDoubleElementsKind(to_kind)) {
- if (IsFastSmiElementsKind(KindTraits::Kind)) {
- if (IsFastPackedElementsKind(KindTraits::Kind) &&
- packed_size != kPackedSizeNotKnown) {
- CopyPackedSmiToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start,
- packed_size, copy_size);
- } else {
- CopySmiToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
- }
- } else {
- CopyObjectToDoubleElements(
- FixedArray::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
+ ElementsKind to_kind = KindTraits::Kind;
+ switch (from_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ CopyObjectToObjectElements(
+ from, from_kind, from_start, to, to_kind, to_start, copy_size);
+ return to->GetHeap()->undefined_value();
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return CopyDoubleToObjectElements(
+ from, from_start, to, to_kind, to_start, copy_size);
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToObjectElements(
+ from, from_start, to, to_kind, to_start, copy_size);
+ return to->GetHeap()->undefined_value();
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // TODO(verwaest): This is a temporary hack to support extending
+ // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength.
+ // This case should be UNREACHABLE().
+ FixedArray* parameter_map = FixedArray::cast(from);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ ElementsKind from_kind = ElementsKindForArray(arguments);
+ return CopyElementsImpl(arguments, from_start, to, from_kind,
+ to_start, packed_size, copy_size);
}
- } else {
- UNREACHABLE();
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ UNREACHABLE();
}
- return to->GetHeap()->undefined_value();
+ return NULL;
}
@@ -1083,25 +1255,40 @@ class FastDoubleElementsAccessor
static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- switch (to_kind) {
+ switch (from_kind) {
case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
+ CopyPackedSmiToDoubleElements(
+ from, from_start, to, to_start, packed_size, copy_size);
+ break;
case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- return CopyDoubleToObjectElements(
- FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
- to_kind, to_start, copy_size);
+ CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
- FixedDoubleArray::cast(to),
- to_start, copy_size);
- return from;
- default:
+ CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+ break;
+ case DICTIONARY_ELEMENTS:
+ CopyDictionaryToDoubleElements(
+ from, from_start, to, to_start, copy_size);
+ break;
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
UNREACHABLE();
}
return to->GetHeap()->undefined_value();
@@ -1158,10 +1345,10 @@ class ExternalElementsAccessor
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? backing_store->get(key)
+ ? BackingStore::cast(backing_store)->get(key)
: backing_store->GetHeap()->undefined_value();
}
@@ -1169,16 +1356,26 @@ class ExternalElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? NONE : ABSENT;
+ ? NONE : ABSENT;
+ }
+
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* backing_store) {
+ return
+ key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
+ ? FIELD : NONEXISTENT;
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
@@ -1194,7 +1391,7 @@ class ExternalElementsAccessor
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
uint32_t capacity =
ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
return key < capacity;
@@ -1303,10 +1500,11 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- SeededNumberDictionary* dict,
+ FixedArrayBase* store,
JSArray* array,
Object* length_object,
uint32_t length) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Heap* heap = array->GetHeap();
int capacity = dict->Capacity();
uint32_t new_length = length;
@@ -1379,7 +1577,7 @@ class DictionaryElementsAccessor
if (mode == JSObject::STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
HandleScope scope(isolate);
- Handle<Object> holder(obj);
+ Handle<Object> holder(obj, isolate);
Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
Handle<Object> args[2] = { name, holder };
Handle<Object> error =
@@ -1406,29 +1604,12 @@ class DictionaryElementsAccessor
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- switch (to_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- CopyDictionaryToObjectElements(
- SeededNumberDictionary::cast(from), from_start,
- FixedArray::cast(to), to_kind, to_start, copy_size);
- return from;
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDictionaryToDoubleElements(
- SeededNumberDictionary::cast(from), from_start,
- FixedDoubleArray::cast(to), to_start, copy_size);
- return from;
- default:
- UNREACHABLE();
- }
- return to->GetHeap()->undefined_value();
+ UNREACHABLE();
+ return NULL;
}
@@ -1446,7 +1627,8 @@ class DictionaryElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
- SeededNumberDictionary* backing_store) {
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@@ -1467,24 +1649,55 @@ class DictionaryElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
- SeededNumberDictionary* backing_store) {
- int entry = backing_store->FindEntry(key);
+ FixedArrayBase* backing_store) {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(backing_store);
+ int entry = dictionary->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
- return backing_store->DetailsAt(entry).attributes();
+ return dictionary->DetailsAt(entry).attributes();
}
return ABSENT;
}
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound) {
+ return backing_store->DetailsAt(entry).type();
+ }
+ return NONEXISTENT;
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* store) {
+ SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ int entry = backing_store->FindEntry(key);
+ if (entry != SeededNumberDictionary::kNotFound &&
+ backing_store->DetailsAt(entry).type() == CALLBACKS &&
+ backing_store->ValueAt(entry)->IsAccessorPair()) {
+ return AccessorPair::cast(backing_store->ValueAt(entry));
+ }
+ return NULL;
+ }
+
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- SeededNumberDictionary* backing_store) {
- return backing_store->FindEntry(key) !=
+ FixedArrayBase* backing_store) {
+ return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
SeededNumberDictionary::kNotFound;
}
- static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
uint32_t index) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
}
@@ -1507,7 +1720,8 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
@@ -1538,7 +1752,8 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArray* parameter_map) {
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
return NONE;
@@ -1550,10 +1765,44 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
+ MUST_USE_RESULT static PropertyType GetTypeImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return FIELD;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetType(
+ receiver, obj, key, arguments);
+ }
+ }
+
+ MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
+ Object* receiver,
+ JSObject* obj,
+ uint32_t key,
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
+ Object* probe = GetParameterMapArg(obj, parameter_map, key);
+ if (!probe->IsTheHole()) {
+ return NULL;
+ } else {
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
+ receiver, obj, key, arguments);
+ }
+ }
+
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
@@ -1587,24 +1836,22 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from,
uint32_t from_start,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
uint32_t to_start,
int packed_size,
int copy_size) {
- FixedArray* parameter_map = FixedArray::cast(from);
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
- return accessor->CopyElements(NULL, from_start, to, to_kind,
- to_start, copy_size, arguments);
+ UNREACHABLE();
+ return NULL;
}
- static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
+ static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments));
}
- static uint32_t GetKeyForIndexImpl(FixedArray* dict,
+ static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
uint32_t index) {
return index;
}
@@ -1612,12 +1859,14 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArray* parameter_map) {
+ FixedArrayBase* parameters) {
+ FixedArray* parameter_map = FixedArray::cast(parameters);
Object* probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ FixedArrayBase* arguments =
+ FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
}
@@ -1630,7 +1879,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
uint32_t length = holder->IsJSArray()
? Smi::cast(JSArray::cast(holder)->length())->value()
: parameter_map->length();
- return key < (length - 2 )
+ return key < (length - 2)
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
@@ -1638,35 +1887,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) {
- switch (array->map()->instance_type()) {
- case FIXED_ARRAY_TYPE:
- if (array->IsDictionary()) {
- return elements_accessors_[DICTIONARY_ELEMENTS];
- } else {
- return elements_accessors_[FAST_HOLEY_ELEMENTS];
- }
- case EXTERNAL_BYTE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_BYTE_ELEMENTS];
- case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_BYTE_ELEMENTS];
- case EXTERNAL_SHORT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_SHORT_ELEMENTS];
- case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_SHORT_ELEMENTS];
- case EXTERNAL_INT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_INT_ELEMENTS];
- case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_UNSIGNED_INT_ELEMENTS];
- case EXTERNAL_FLOAT_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_FLOAT_ELEMENTS];
- case EXTERNAL_DOUBLE_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_DOUBLE_ELEMENTS];
- case EXTERNAL_PIXEL_ARRAY_TYPE:
- return elements_accessors_[EXTERNAL_PIXEL_ELEMENTS];
- default:
- UNREACHABLE();
- return NULL;
- }
+ return elements_accessors_[ElementsKindForArray(array)];
}
@@ -1697,7 +1918,7 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
- typename ElementsKindTraits::BackingStore* backing_store) {
+ FixedArrayBase* backing_store) {
JSArray* array = JSArray::cast(obj);
// Fast case: The new length fits into a Smi.
@@ -1753,4 +1974,100 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
}
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args) {
+ Heap* heap = array->GetIsolate()->heap();
+
+ // Optimize the case where there is one argument and the argument is a
+ // small smi.
+ if (args->length() == 1) {
+ Object* obj = (*args)[0];
+ if (obj->IsSmi()) {
+ int len = Smi::cast(obj)->value();
+ if (len > 0 && len < JSObject::kInitialMaxFastElementArray) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ MaybeObject* maybe_array = array->Initialize(len, len);
+ if (maybe_array->IsFailure()) return maybe_array;
+
+ if (!IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ maybe_array = array->TransitionElementsKind(elements_kind);
+ if (maybe_array->IsFailure()) return maybe_array;
+ }
+
+ return array;
+ } else if (len == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+ }
+
+ // Take the argument as the length.
+ MaybeObject* maybe_obj = array->Initialize(0);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ return array->SetElementsLength((*args)[0]);
+ }
+
+ // Optimize the case where there are no parameters passed.
+ if (args->length() == 0) {
+ return array->Initialize(JSArray::kPreallocatedArrayElements);
+ }
+
+ // Set length and elements on the array.
+ int number_of_elements = args->length();
+ MaybeObject* maybe_object =
+ array->EnsureCanContainElements(args, 0, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
+ if (maybe_object->IsFailure()) return maybe_object;
+
+ // Allocate an appropriately typed elements array.
+ MaybeObject* maybe_elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+ number_of_elements);
+ } else {
+ maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
+ }
+ FixedArrayBase* elms;
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ // Fill in the content
+ switch (array->GetElementsKind()) {
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* smi_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray* object_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ object_elms->set(index, (*args)[index], mode);
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ double_elms->set(index, (*args)[index]->Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(number_of_elements));
+ return array;
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/elements.h b/src/3rdparty/v8/src/elements.h
index 8a83f0f..6353aae 100644
--- a/src/3rdparty/v8/src/elements.h
+++ b/src/3rdparty/v8/src/elements.h
@@ -82,6 +82,28 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
+ // Returns an element's type, or NONEXISTENT if there is no such
+ // element. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual PropertyType GetType(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
+ // Returns an element's accessors, or NULL if the element does not exist or
+ // is plain. This method doesn't iterate up the prototype chain. The caller
+ // can optionally pass in the backing store to use for the check, which must
+ // be compatible with the ElementsKind of the ElementsAccessor. If
+ // backing_store is NULL, the holder->elements() is used as the backing store.
+ MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
+ Object* receiver,
+ JSObject* holder,
+ uint32_t key,
+ FixedArrayBase* backing_store = NULL) = 0;
+
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@@ -121,17 +143,17 @@ class ElementsAccessor {
MUST_USE_RESULT virtual MaybeObject* CopyElements(
JSObject* source_holder,
uint32_t source_start,
+ ElementsKind source_kind,
FixedArrayBase* destination,
- ElementsKind destination_kind,
uint32_t destination_start,
int copy_size,
FixedArrayBase* source = NULL) = 0;
MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder,
FixedArrayBase* to,
- ElementsKind to_kind,
+ ElementsKind from_kind,
FixedArrayBase* from = NULL) {
- return CopyElements(from_holder, 0, to, to_kind, 0,
+ return CopyElements(from_holder, 0, from_kind, to, 0,
kCopyToEndAndInitializeToHole, from);
}
@@ -175,15 +197,11 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
+void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key,
+ bool allow_appending = false);
-void CopyObjectToObjectElements(FixedArray* from_obj,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArray* to_obj,
- ElementsKind to_kind,
- uint32_t to_start,
- int copy_size);
-
+MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements(
+ JSArray* array, Arguments* args);
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/execution.cc b/src/3rdparty/v8/src/execution.cc
index 913bf64..fc153b4 100644
--- a/src/3rdparty/v8/src/execution.cc
+++ b/src/3rdparty/v8/src/execution.cc
@@ -113,7 +113,7 @@ static Handle<Object> Invoke(bool is_construct,
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
SaveContext save(isolate);
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
@@ -134,10 +134,10 @@ static Handle<Object> Invoke(bool is_construct,
// Update the pending exception flag and return the value.
*has_pending_exception = value->IsException();
- ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception());
+ ASSERT(*has_pending_exception == isolate->has_pending_exception());
if (*has_pending_exception) {
isolate->ReportPendingMessages();
- if (isolate->pending_exception() == Failure::OutOfMemoryException()) {
+ if (isolate->pending_exception()->IsOutOfMemory()) {
if (!isolate->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
@@ -162,8 +162,7 @@ Handle<Object> Execution::Call(Handle<Object> callable,
int argc,
Handle<Object> argv[],
bool* pending_exception,
- bool convert_receiver)
-{
+ bool convert_receiver) {
return Call(callable, receiver, argc, argv, pending_exception,
convert_receiver, Handle<Object>());
}
@@ -191,7 +190,9 @@ Handle<Object> Execution::Call(Handle<Object> callable,
// Under some circumstances, 'global' can be the JSBuiltinsObject
// In that case, don't rewrite. (FWIW, the same holds for
// GetIsolate()->global_object()->global_receiver().)
- if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global);
+ if (!global->IsJSBuiltinsObject()) {
+ receiver = Handle<Object>(global, func->GetIsolate());
+ }
} else {
receiver = ToObject(receiver, pending_exception);
}
@@ -206,7 +207,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func,
int argc,
Handle<Object> argv[],
bool* pending_exception) {
- return Invoke(true, func, Isolate::Current()->global_object(), argc, argv,
+ return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv,
pending_exception, Handle<Object>());
}
@@ -228,11 +229,14 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Handle<Object> result = Invoke(false, func, receiver, argc, args,
caught_exception, Handle<Object>());
+ Isolate* isolate = func->GetIsolate();
if (*caught_exception) {
ASSERT(catcher.HasCaught());
- Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
+ if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
+ V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
+ }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@@ -242,8 +246,8 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
isolate->OptionalRescheduleException(true);
}
- ASSERT(!Isolate::Current()->has_pending_exception());
- ASSERT(!Isolate::Current()->external_caught_exception());
+ ASSERT(!isolate->has_pending_exception());
+ ASSERT(!isolate->external_caught_exception());
return result;
}
@@ -261,7 +265,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -285,7 +289,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object,
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -318,7 +322,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) {
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -346,7 +350,7 @@ Handle<Object> Execution::TryGetConstructorDelegate(
while (fun->IsJSFunctionProxy()) {
fun = JSFunctionProxy::cast(fun)->call_trap();
}
- if (fun->IsJSFunction()) return Handle<Object>(fun);
+ if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
// Objects created through the API can have an instance-call handler
// that should be used when calling the object as a function.
@@ -449,25 +453,6 @@ void StackGuard::TerminateExecution() {
}
-bool StackGuard::IsRuntimeProfilerTick() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
-}
-
-
-void StackGuard::RequestRuntimeProfilerTick() {
- // Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
-}
-
-
void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) {
@@ -637,7 +622,7 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
} while (false)
-Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
+Handle<Object> Execution::ToBoolean(Isolate* isolate, Handle<Object> obj) {
// See the similar code in runtime.js:ToBoolean.
if (obj->IsBoolean()) return obj;
bool result = true;
@@ -649,7 +634,7 @@ Handle<Object> Execution::ToBoolean(Handle<Object> obj) {
double value = obj->Number();
result = !((value == 0) || isnan(value));
}
- return Handle<Object>(HEAP->ToBoolean(result));
+ return Handle<Object>(isolate->heap()->ToBoolean(result), isolate);
}
@@ -719,9 +704,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) {
return factory->undefined_value();
}
- Handle<Object> char_at =
- GetProperty(isolate->js_builtins_object(),
- factory->char_at_symbol());
+ Handle<Object> char_at = GetProperty(
+ isolate, isolate->js_builtins_object(), factory->char_at_string());
if (!char_at->IsJSFunction()) {
return factory->undefined_value();
}
@@ -822,7 +806,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
args,
&caught_exception);
if (caught_exception || !result->IsString()) {
- return isolate->factory()->empty_symbol();
+ return isolate->factory()->empty_string();
}
return Handle<String>::cast(result);
@@ -959,18 +943,14 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
}
stack_guard->Continue(CODE_READY);
}
- if (!stack_guard->IsTerminateExecution()) {
+ if (!stack_guard->IsTerminateExecution() &&
+ !FLAG_manual_parallel_recompilation) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->counters()->stack_interrupts()->Increment();
- // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
- if (FLAG_count_based_interrupts ||
- stack_guard->IsRuntimeProfilerTick()) {
- isolate->counters()->runtime_profiler_ticks()->Increment();
- stack_guard->Continue(RUNTIME_PROFILER_TICK);
- isolate->runtime_profiler()->OptimizeNow();
- }
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ isolate->runtime_profiler()->OptimizeNow();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
diff --git a/src/3rdparty/v8/src/execution.h b/src/3rdparty/v8/src/execution.h
index 90219f5..7fc822e 100644
--- a/src/3rdparty/v8/src/execution.h
+++ b/src/3rdparty/v8/src/execution.h
@@ -41,9 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- RUNTIME_PROFILER_TICK = 1 << 5,
- GC_REQUEST = 1 << 6,
- CODE_READY = 1 << 7
+ GC_REQUEST = 1 << 5,
+ CODE_READY = 1 << 6
};
@@ -101,7 +100,7 @@ class Execution : public AllStatic {
bool* caught_exception);
// ECMA-262 9.2
- static Handle<Object> ToBoolean(Handle<Object> obj);
+ static Handle<Object> ToBoolean(Isolate* isolate, Handle<Object> obj);
// ECMA-262 9.3
static Handle<Object> ToNumber(Handle<Object> obj, bool* exc);
@@ -202,8 +201,6 @@ class StackGuard {
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
- bool IsRuntimeProfilerTick();
- void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc b/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
index 50d8761..76d2030 100644
--- a/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
+++ b/src/3rdparty/v8/src/extensions/externalize-string-extension.cc
@@ -93,13 +93,13 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
}
- if (string->IsAsciiRepresentation() && !force_two_byte) {
- char* data = new char[string->length()];
+ if (string->IsOneByteRepresentation() && !force_two_byte) {
+ uint8_t* data = new uint8_t[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
- data, string->length());
+ reinterpret_cast<char*>(data), string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
+ if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
@@ -109,7 +109,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
data, string->length());
result = string->MakeExternal(resource);
- if (result && !string->IsSymbol()) {
+ if (result && !string->IsInternalizedString()) {
HEAP->external_string_table()->AddString(*string);
}
if (!result) delete resource;
@@ -127,7 +127,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
}
- return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
+ return
+ Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
v8::True() : v8::False();
}
diff --git a/src/3rdparty/v8/src/factory.cc b/src/3rdparty/v8/src/factory.cc
index 703251f..4c6af40 100644
--- a/src/3rdparty/v8/src/factory.cc
+++ b/src/3rdparty/v8/src/factory.cc
@@ -157,50 +157,48 @@ Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
}
-// Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Vector<const char> string) {
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(string),
+ isolate()->heap()->InternalizeUtf8String(string),
String);
}
-// Symbols are created in the old generation (data space).
-Handle<String> Factory::LookupSymbol(Handle<String> string) {
+// Internalized strings are created in the old generation (data space).
+Handle<String> Factory::InternalizeString(Handle<String> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(*string),
+ isolate()->heap()->InternalizeString(*string),
String);
}
-Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
+Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupAsciiSymbol(string),
+ isolate()->heap()->InternalizeOneByteString(string),
String);
}
-Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
- int from,
- int length) {
+Handle<String> Factory::InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupAsciiSymbol(string,
- from,
- length),
+ isolate()->heap()->InternalizeOneByteString(
+ string, from, length),
String);
}
-Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) {
+Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupTwoByteSymbol(string),
+ isolate()->heap()->InternalizeTwoByteString(string),
String);
}
-Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
- PretenureFlag pretenure) {
+Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromAscii(string, pretenure),
+ isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
String);
}
@@ -222,12 +220,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
}
-Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
+Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateRawAsciiString(length, pretenure),
- SeqAsciiString);
+ isolate()->heap()->AllocateRawOneByteString(length, pretenure),
+ SeqOneByteString);
}
@@ -285,6 +283,14 @@ Handle<String> Factory::NewExternalStringFromTwoByte(
}
+Handle<Symbol> Factory::NewSymbol() {
+ CALL_HEAP_FUNCTION(
+ isolate(),
+ isolate()->heap()->AllocateSymbol(),
+ Symbol);
+}
+
+
Handle<Context> Factory::NewNativeContext() {
CALL_HEAP_FUNCTION(
isolate(),
@@ -363,9 +369,19 @@ Handle<Struct> Factory::NewStruct(InstanceType type) {
}
-Handle<AccessorInfo> Factory::NewAccessorInfo() {
- Handle<AccessorInfo> info =
- Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE));
+Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() {
+ Handle<DeclaredAccessorInfo> info =
+ Handle<DeclaredAccessorInfo>::cast(
+ NewStruct(DECLARED_ACCESSOR_INFO_TYPE));
+ info->set_flag(0); // Must clear the flag, it was initialized as undefined.
+ return info;
+}
+
+
+Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(
+ NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE));
info->set_flag(0); // Must clear the flag, it was initialized as undefined.
return info;
}
@@ -525,6 +541,12 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
+Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
+ int new_length) {
+ CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
+}
+
+
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
@@ -735,16 +757,17 @@ Handle<String> Factory::EmergencyNewError(const char* type,
Handle<Object> Factory::NewError(const char* maker,
const char* type,
Handle<JSArray> args) {
- Handle<String> make_str = LookupAsciiSymbol(maker);
+ Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj(
- isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str));
+ isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str),
+ isolate());
// If the builtins haven't been properly configured yet this error
// constructor may not have been defined. Bail out.
if (!fun_obj->IsJSFunction()) {
return EmergencyNewError(type, args);
}
Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj);
- Handle<Object> type_obj = LookupAsciiSymbol(type);
+ Handle<Object> type_obj = InternalizeUtf8String(type);
Handle<Object> argv[] = { type_obj, args };
// Invoke the JavaScript factory method. If an exception is thrown while
@@ -766,7 +789,7 @@ Handle<Object> Factory::NewError(Handle<String> message) {
Handle<Object> Factory::NewError(const char* constructor,
Handle<String> message) {
- Handle<String> constr = LookupAsciiSymbol(constructor);
+ Handle<String> constr = InternalizeUtf8String(constructor);
Handle<JSFunction> fun = Handle<JSFunction>(
JSFunction::cast(isolate()->js_builtins_object()->
GetPropertyNoExceptionThrown(*constr)));
@@ -844,7 +867,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
// Currently safe because it is only invoked from Genesis.
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, constructor_symbol(),
+ prototype, constructor_string(),
function, DONT_ENUM));
return function;
}
@@ -870,6 +893,13 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
}
+Handle<JSObject> Factory::NewExternal(void* value) {
+ CALL_HEAP_FUNCTION(isolate(),
+ isolate()->heap()->AllocateExternal(value),
+ JSObject);
+}
+
+
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
@@ -895,9 +925,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) {
}
-Handle<String> Factory::SymbolFromString(Handle<String> value) {
+Handle<String> Factory::InternalizedStringFromString(Handle<String> value) {
CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->LookupSymbol(*value), String);
+ isolate()->heap()->InternalizeString(*value), String);
}
@@ -926,10 +956,11 @@ Handle<GlobalObject> Factory::NewGlobalObject(
-Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
+Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED),
+ isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure),
JSObject);
}
@@ -937,6 +968,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
Handle<JSArray> Factory::NewJSArray(int capacity,
ElementsKind elements_kind,
PretenureFlag pretenure) {
+ if (capacity != 0) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
@@ -955,6 +989,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
+ elements->length(),
pretenure),
JSArray);
}
@@ -1247,13 +1282,17 @@ Handle<JSFunction> Factory::CreateApiFunction(
ASSERT(type != INVALID_TYPE);
Handle<JSFunction> result =
- NewFunction(Factory::empty_symbol(),
+ NewFunction(Factory::empty_string(),
type,
instance_size,
code,
true);
+
+ // Set length.
+ result->shared()->set_length(obj->length());
+
// Set class name.
- Handle<Object> class_name = Handle<Object>(obj->class_name());
+ Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate());
if (class_name->IsString()) {
result->shared()->set_instance_class_name(*class_name);
result->shared()->set_name(*class_name);
@@ -1313,7 +1352,7 @@ Handle<JSFunction> Factory::CreateApiFunction(
while (true) {
Object* props = info->property_accessors();
if (!props->IsUndefined()) {
- Handle<Object> props_handle(props);
+ Handle<Object> props_handle(props, isolate());
NeanderArray props_array(props_handle);
max_number_of_additional_properties += props_array.length();
}
@@ -1325,11 +1364,12 @@ Handle<JSFunction> Factory::CreateApiFunction(
Map::EnsureDescriptorSlack(map, max_number_of_additional_properties);
while (true) {
- Handle<Object> props = Handle<Object>(obj->property_accessors());
+ Handle<Object> props = Handle<Object>(obj->property_accessors(),
+ isolate());
if (!props->IsUndefined()) {
Map::AppendCallbackDescriptors(map, props);
}
- Handle<Object> parent = Handle<Object>(obj->parent_template());
+ Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate());
if (parent->IsUndefined()) break;
obj = Handle<FunctionTemplateInfo>::cast(parent);
}
@@ -1376,7 +1416,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Check to see whether there is a matching element in the cache.
Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
+ Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
Handle<Map> map =
@@ -1428,7 +1468,7 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
bool* pending_exception) {
// Configure the instance by adding the properties specified by the
// instance template.
- Handle<Object> instance_template = Handle<Object>(desc->instance_template());
+ Handle<Object> instance_template(desc->instance_template(), isolate());
if (!instance_template->IsUndefined()) {
Execution::ConfigureInstance(instance,
instance_template,
@@ -1441,17 +1481,15 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
Handle<Object> Factory::GlobalConstantFor(Handle<String> name) {
Heap* h = isolate()->heap();
- if (name->Equals(h->undefined_symbol())) return undefined_value();
- if (name->Equals(h->nan_symbol())) return nan_value();
- if (name->Equals(h->infinity_symbol())) return infinity_value();
+ if (name->Equals(h->undefined_string())) return undefined_value();
+ if (name->Equals(h->nan_string())) return nan_value();
+ if (name->Equals(h->infinity_string())) return infinity_value();
return Handle<Object>::null();
}
Handle<Object> Factory::ToBoolean(bool value) {
- return Handle<Object>(value
- ? isolate()->heap()->true_value()
- : isolate()->heap()->false_value());
+ return value ? true_value() : false_value();
}
diff --git a/src/3rdparty/v8/src/factory.h b/src/3rdparty/v8/src/factory.h
index 51065aa..3651d36 100644
--- a/src/3rdparty/v8/src/factory.h
+++ b/src/3rdparty/v8/src/factory.h
@@ -79,16 +79,16 @@ class Factory {
Handle<TypeFeedbackInfo> NewTypeFeedbackInfo();
- Handle<String> LookupSymbol(Vector<const char> str);
- Handle<String> LookupSymbol(Handle<String> str);
- Handle<String> LookupAsciiSymbol(Vector<const char> str);
- Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
+ Handle<String> InternalizeUtf8String(Vector<const char> str);
+ Handle<String> InternalizeUtf8String(const char* str) {
+ return InternalizeUtf8String(CStrVector(str));
+ }
+ Handle<String> InternalizeString(Handle<String> str);
+ Handle<String> InternalizeOneByteString(Vector<const uint8_t> str);
+ Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>,
int from,
int length);
- Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
- Handle<String> LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
- }
+ Handle<String> InternalizeTwoByteString(Vector<const uc16> str);
// String creation functions. Most of the string creation functions take
@@ -113,9 +113,15 @@ class Factory {
// two byte.
//
// ASCII strings are pretenured when used as keys in the SourceCodeCache.
- Handle<String> NewStringFromAscii(
- Vector<const char> str,
+ Handle<String> NewStringFromOneByte(
+ Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
+ // TODO(dcarney): remove this function.
+ inline Handle<String> NewStringFromAscii(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure);
+ }
// UTF8 strings are pretenured when used for regexp literal patterns and
// flags in the parser.
@@ -130,7 +136,7 @@ class Factory {
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<SeqAsciiString> NewRawAsciiString(
+ Handle<SeqOneByteString> NewRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString(
@@ -160,6 +166,9 @@ class Factory {
Handle<String> NewExternalStringFromTwoByte(
const ExternalTwoByteString::Resource* resource);
+ // Create a symbol.
+ Handle<Symbol> NewSymbol();
+
// Create a global (but otherwise uninitialized) context.
Handle<Context> NewNativeContext();
@@ -189,14 +198,16 @@ class Factory {
Handle<Context> previous,
Handle<ScopeInfo> scope_info);
- // Return the Symbol matching the passed in string.
- Handle<String> SymbolFromString(Handle<String> value);
+ // Return the internalized version of the passed in string.
+ Handle<String> InternalizedStringFromString(Handle<String> value);
// Allocate a new struct. The struct is pretenured (allocated directly in
// the old generation).
Handle<Struct> NewStruct(InstanceType type);
- Handle<AccessorInfo> NewAccessorInfo();
+ Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo();
+
+ Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo();
Handle<Script> NewScript(Handle<String> source);
@@ -239,6 +250,9 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
+ Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
+ int new_length);
+
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
@@ -267,7 +281,8 @@ class Factory {
// JS objects are pretenured when allocated by the bootstrapper and
// runtime.
- Handle<JSObject> NewJSObjectFromMap(Handle<Map> map);
+ Handle<JSObject> NewJSObjectFromMap(Handle<Map> map,
+ PretenureFlag pretenure = NOT_TENURED);
// JS modules are pretenured.
Handle<JSModule> NewJSModule(Handle<Context> context,
@@ -325,6 +340,8 @@ class Factory {
Handle<ScopeInfo> NewScopeInfo(int length);
+ Handle<JSObject> NewExternal(void* value);
+
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
@@ -420,16 +437,16 @@ class Factory {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) \
+#define STRING_ACCESSOR(name, str) \
inline Handle<String> name() { \
return Handle<String>(BitCast<String**>( \
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
}
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
- Handle<String> hidden_symbol() {
- return Handle<String>(&isolate()->heap()->hidden_symbol_);
+ Handle<String> hidden_string() {
+ return Handle<String>(&isolate()->heap()->hidden_string_);
}
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
diff --git a/src/3rdparty/v8/src/flag-definitions.h b/src/3rdparty/v8/src/flag-definitions.h
index 96d03fa..b4184ff 100644
--- a/src/3rdparty/v8/src/flag-definitions.h
+++ b/src/3rdparty/v8/src/flag-definitions.h
@@ -141,6 +141,8 @@ DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
DEFINE_bool(harmony_modules, false,
"enable harmony modules (implies block scoping)")
+DEFINE_bool(harmony_symbols, false,
+ "enable harmony symbols (a.k.a. private names)")
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
@@ -149,6 +151,7 @@ DEFINE_bool(harmony_observation, false,
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
+DEFINE_implication(harmony, harmony_symbols)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
DEFINE_implication(harmony, harmony_observation)
@@ -158,6 +161,8 @@ DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
+DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
+ "generate array elements transition stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
@@ -181,6 +186,7 @@ DEFINE_int(max_inlined_nodes, 196,
DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true,
"crankshaft harvests type feedback from stub cache")
@@ -193,6 +199,8 @@ DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
+DEFINE_bool(trace_track_allocation_sites, false,
+ "trace the tracking of allocation sites")
DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
DEFINE_bool(stress_environments, false, "environment for every instruction")
DEFINE_int(deopt_every_n_times,
@@ -202,13 +210,20 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(use_osr, true, "use on-stack replacement")
+DEFINE_bool(idefs, false, "use informative definitions")
DEFINE_bool(array_bounds_checks_elimination, true,
"perform array bounds checks elimination")
DEFINE_bool(array_index_dehoisting, true,
"perform array index dehoisting")
DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
+DEFINE_bool(fold_constants, true, "use constant folding")
DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-
+DEFINE_bool(unreachable_code_elimination, false,
+ "eliminate unreachable code (hidden behind soft deopts)")
+DEFINE_bool(track_allocation_sites, true,
+ "Use allocation site info to reduce transitions")
+DEFINE_bool(optimize_constructed_arrays, false,
+ "Use allocation site info on constructed arrays")
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")
@@ -225,7 +240,7 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
- "allow uint32 values on optimize frames if they are used only in"
+ "allow uint32 values on optimize frames if they are used only in "
"safe operations")
DEFINE_bool(parallel_recompilation, false,
@@ -233,6 +248,12 @@ DEFINE_bool(parallel_recompilation, false,
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue")
+DEFINE_bool(manual_parallel_recompilation, false,
+ "disable automatic optimization")
+DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
+DEFINE_bool(omit_prototype_checks_for_leaf_maps, true,
+ "do not emit prototype checks if all prototypes have leaf maps, "
+ "deoptimize the optimized code if the layout of the maps changes.")
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -243,8 +264,6 @@ DEFINE_bool(self_optimization, false,
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(count_based_interrupts, false,
- "trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
@@ -260,7 +279,6 @@ DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
@@ -297,8 +315,12 @@ DEFINE_bool(enable_movw_movt, false,
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
+DEFINE_bool(enable_32dregs, true,
+ "enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
+DEFINE_bool(enable_vldr_imm, false,
+ "enable use of constant pools for double immediate (ARM only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
@@ -331,7 +353,9 @@ DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
DEFINE_bool(opt, true, "use adaptive optimizations")
DEFINE_bool(always_opt, false, "always try to optimize functions")
DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(trace_deopt, false, "trace deoptimization")
+DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
+DEFINE_bool(trace_stub_failures, false,
+ "trace deoptimization of generated code stubs")
// compiler.cc
DEFINE_int(min_preparse_length, 1024,
@@ -346,8 +370,20 @@ DEFINE_bool(compilation_cache, true, "enable compilation cache")
DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+// cpu-profiler.cc
+DEFINE_int(cpu_profiler_sampling_period, 1000,
+ "CPU profiler sampling period in microseconds")
+
// debug.cc
DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
+DEFINE_bool(trace_js_array_abuse, false,
+ "trace out-of-bounds accesses to JS arrays")
+DEFINE_bool(trace_external_array_abuse, false,
+ "trace out-of-bounds-accesses to external arrays")
+DEFINE_bool(trace_array_abuse, false,
+ "trace out-of-bounds accesses to all arrays")
+DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
+DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
DEFINE_bool(debugger_auto_break, true,
"automatically set the debug break flag when debugger commands are "
"in the queue")
@@ -393,11 +429,13 @@ DEFINE_bool(trace_external_memory, false,
"it is adjusted.")
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
+DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
+ "make maps embedded in optimized code weak")
DEFINE_bool(flush_code, true,
"flush code that we expect not to use again (during full gc)")
-DEFINE_bool(flush_code_incrementally, false,
+DEFINE_bool(flush_code_incrementally, true,
"flush code that we expect not to use again (incrementally)")
-DEFINE_bool(age_code, false,
+DEFINE_bool(age_code, true,
"track un-executed functions to age code and flush only "
"old code")
DEFINE_bool(incremental_marking, true, "use incremental marking")
@@ -406,6 +444,12 @@ DEFINE_bool(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 0,
+ "number of parallel and concurrent sweeping threads")
+DEFINE_bool(parallel_marking, false, "enable parallel marking")
+DEFINE_int(marking_threads, 0, "number of parallel marking threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
@@ -416,12 +460,6 @@ DEFINE_bool(use_idle_notification, true,
// ic.cc
DEFINE_bool(use_ic, true, "use inline caching")
-#ifdef LIVE_OBJECT_LIST
-// liveobjectlist.cc
-DEFINE_string(lol_workdir, NULL, "path for lol temp files")
-DEFINE_bool(verify_lol, false, "perform debugging verification for lol")
-#endif
-
// macro-assembler-ia32.cc
DEFINE_bool(native_code_counters, false,
"generate extra code for manipulating stats counters")
@@ -439,6 +477,9 @@ DEFINE_bool(incremental_code_compaction, true,
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
+DEFINE_bool(use_marking_progress_bar, true,
+ "Use a progress bar to scan large objects in increments when "
+ "incremental marking is active.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -652,12 +693,14 @@ DEFINE_bool(prof_lazy, false,
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
-DEFINE_bool(sliding_state_window, false,
- "Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
+DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
+DEFINE_bool(log_timer_events, false,
+ "Time events including external callbacks.")
+DEFINE_implication(log_timer_events, log_internal_timer_events)
//
// Disassembler only flags
diff --git a/src/3rdparty/v8/src/frames-inl.h b/src/3rdparty/v8/src/frames-inl.h
index 27a526c..83b37a5 100644
--- a/src/3rdparty/v8/src/frames-inl.h
+++ b/src/3rdparty/v8/src/frames-inl.h
@@ -235,6 +235,11 @@ inline Object* JavaScriptFrame::function() const {
}
+inline StubFrame::StubFrame(StackFrameIterator* iterator)
+ : StandardFrame(iterator) {
+}
+
+
inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator)
: JavaScriptFrame(iterator) {
}
@@ -250,6 +255,11 @@ inline InternalFrame::InternalFrame(StackFrameIterator* iterator)
}
+inline StubFailureTrampolineFrame::StubFailureTrampolineFrame(
+ StackFrameIterator* iterator) : StandardFrame(iterator) {
+}
+
+
inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator)
: InternalFrame(iterator) {
}
diff --git a/src/3rdparty/v8/src/frames.cc b/src/3rdparty/v8/src/frames.cc
index 6342852..7dcf540 100644
--- a/src/3rdparty/v8/src/frames.cc
+++ b/src/3rdparty/v8/src/frames.cc
@@ -88,14 +88,6 @@ class StackHandlerIterator BASE_EMBEDDED {
#define INITIALIZE_SINGLETON(type, field) field##_(this),
-StackFrameIterator::StackFrameIterator()
- : isolate_(Isolate::Current()),
- STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
- frame_(NULL), handler_(NULL),
- thread_(isolate_->thread_local_top()),
- fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) {
- Reset();
-}
StackFrameIterator::StackFrameIterator(Isolate* isolate)
: isolate_(isolate),
STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON)
@@ -210,11 +202,6 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) {
// -------------------------------------------------------------------------
-StackTraceFrameIterator::StackTraceFrameIterator() {
- if (!done() && !IsValidFrame()) Advance();
-}
-
-
StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate)
: JavaScriptFrameIterator(isolate) {
if (!done() && !IsValidFrame()) Advance();
@@ -617,13 +604,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const {
}
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
- // Make sure that optimized frames do not contain any stack handlers.
- StackHandlerIterator it(this, top_handler());
- ASSERT(it.done());
-#endif
-
+void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
ASSERT(!SafeStackFrameIterator::is_active(isolate()));
@@ -649,7 +630,9 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
// Skip saved double registers.
if (safepoint_entry.has_doubles()) {
- parameters_base += DoubleRegister::kNumAllocatableRegisters *
+ // Number of doubles not known at snapshot time.
+ ASSERT(!Serializer::enabled());
+ parameters_base += DoubleRegister::NumAllocatableRegisters() *
kDoubleSize / kPointerSize;
}
@@ -681,14 +664,51 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const {
}
}
- // Visit the context and the function.
+ // Visit the return address in the callee and incoming arguments.
+ IteratePc(v, pc_address(), code);
+
+ // Visit the context in stub frame and JavaScript frame.
+ // Visit the function in JavaScript frame.
Object** fixed_base = &Memory::Object_at(
- fp() + JavaScriptFrameConstants::kFunctionOffset);
+ fp() + StandardFrameConstants::kMarkerOffset);
Object** fixed_limit = &Memory::Object_at(fp());
v->VisitPointers(fixed_base, fixed_limit);
+}
- // Visit the return address in the callee and incoming arguments.
- IteratePc(v, pc_address(), code);
+
+void StubFrame::Iterate(ObjectVisitor* v) const {
+ IterateCompiledFrame(v);
+}
+
+
+Code* StubFrame::unchecked_code() const {
+ return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc()));
+}
+
+
+Address StubFrame::GetCallerStackPointer() const {
+ return fp() + ExitFrameConstants::kCallerSPDisplacement;
+}
+
+
+int StubFrame::GetNumberOfIncomingArguments() const {
+ return 0;
+}
+
+
+void OptimizedFrame::Iterate(ObjectVisitor* v) const {
+#ifdef DEBUG
+ // Make sure that optimized frames do not contain any stack handlers.
+ StackHandlerIterator it(this, top_handler());
+ ASSERT(it.done());
+#endif
+
+ IterateCompiledFrame(v);
+}
+
+
+void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
+ Memory::Object_at(GetParameterSlot(index)) = value;
}
@@ -751,13 +771,14 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
-void JavaScriptFrame::PrintTop(FILE* file,
+void JavaScriptFrame::PrintTop(Isolate* isolate,
+ FILE* file,
bool print_args,
bool print_line_number) {
// constructor calls
- HandleScope scope;
+ HandleScope scope(isolate);
AssertNoAllocation no_allocation;
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
while (!it.done()) {
if (it.frame()->is_java_script()) {
JavaScriptFrame* frame = it.frame();
@@ -1052,7 +1073,7 @@ void StackFrame::PrintIndex(StringStream* accumulator,
void JavaScriptFrame::Print(StringStream* accumulator,
PrintMode mode,
int index) const {
- HandleScope scope;
+ HandleScope scope(isolate());
Object* receiver = this->receiver();
Object* function = this->function();
@@ -1066,7 +1087,7 @@ void JavaScriptFrame::Print(StringStream* accumulator,
// doesn't contain scope info, scope_info will return 0 for the number of
// parameters, stack local variables, context local variables, stack slots,
// or context slots.
- Handle<ScopeInfo> scope_info(ScopeInfo::Empty());
+ Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate()));
if (function->IsJSFunction()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared());
@@ -1271,6 +1292,42 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
}
+void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
+ Object** base = &Memory::Object_at(sp());
+ Object** limit = &Memory::Object_at(fp() +
+ kFirstRegisterParameterFrameOffset);
+ v->VisitPointers(base, limit);
+ base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset);
+ const int offset = StandardFrameConstants::kContextOffset;
+ limit = &Memory::Object_at(fp() + offset) + 1;
+ v->VisitPointers(base, limit);
+ IteratePc(v, pc_address(), LookupCode());
+}
+
+
+Address StubFailureTrampolineFrame::GetCallerStackPointer() const {
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+Code* StubFailureTrampolineFrame::unchecked_code() const {
+ int i = 0;
+ for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) {
+ Code* trampoline;
+ StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate());
+ ASSERT(trampoline != NULL);
+ Address current_pc = pc();
+ Address code_start = trampoline->instruction_start();
+ Address code_end = code_start + trampoline->instruction_size();
+ if (code_start <= current_pc && current_pc < code_end) {
+ return trampoline;
+ }
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
// -------------------------------------------------------------------------
@@ -1432,9 +1489,9 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) {
return NULL;
}
-Vector<StackFrame*> CreateStackMap(Zone* zone) {
+Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) {
ZoneList<StackFrame*> list(10, zone);
- for (StackFrameIterator it; !it.done(); it.Advance()) {
+ for (StackFrameIterator it(isolate); !it.done(); it.Advance()) {
StackFrame* frame = AllocateFrameCopy(it.frame(), zone);
list.Add(frame, zone);
}
diff --git a/src/3rdparty/v8/src/frames.h b/src/3rdparty/v8/src/frames.h
index 30f7e1f..a91d004 100644
--- a/src/3rdparty/v8/src/frames.h
+++ b/src/3rdparty/v8/src/frames.h
@@ -130,15 +130,33 @@ class StackHandler BASE_EMBEDDED {
};
-#define STACK_FRAME_TYPE_LIST(V) \
- V(ENTRY, EntryFrame) \
- V(ENTRY_CONSTRUCT, EntryConstructFrame) \
- V(EXIT, ExitFrame) \
- V(JAVA_SCRIPT, JavaScriptFrame) \
- V(OPTIMIZED, OptimizedFrame) \
- V(INTERNAL, InternalFrame) \
- V(CONSTRUCT, ConstructFrame) \
- V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+#define STACK_FRAME_TYPE_LIST(V) \
+ V(ENTRY, EntryFrame) \
+ V(ENTRY_CONSTRUCT, EntryConstructFrame) \
+ V(EXIT, ExitFrame) \
+ V(JAVA_SCRIPT, JavaScriptFrame) \
+ V(OPTIMIZED, OptimizedFrame) \
+ V(STUB, StubFrame) \
+ V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \
+ V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
+ V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ // StandardFrame::IterateExpressions assumes that kContextOffset is the last
+ // object pointer.
+ static const int kFixedFrameSize = 4 * kPointerSize;
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerSPOffset = +2 * kPointerSize;
+};
// Abstract base class for all stack frames.
@@ -193,6 +211,9 @@ class StackFrame BASE_EMBEDDED {
bool is_optimized() const { return type() == OPTIMIZED; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
+ bool is_stub_failure_trampoline() const {
+ return type() == STUB_FAILURE_TRAMPOLINE;
+ }
bool is_construct() const { return type() == CONSTRUCT; }
virtual bool is_standard() const { return false; }
@@ -263,12 +284,12 @@ class StackFrame BASE_EMBEDDED {
PrintMode mode,
int index) const { }
+ Isolate* isolate() const { return isolate_; }
+
protected:
inline explicit StackFrame(StackFrameIterator* iterator);
virtual ~StackFrame() { }
- Isolate* isolate() const { return isolate_; }
-
// Compute the stack pointer for the calling frame.
virtual Address GetCallerStackPointer() const = 0;
@@ -448,6 +469,9 @@ class StandardFrame: public StackFrame {
// construct frame.
static inline bool IsConstructFrame(Address fp);
+ // Used by OptimizedFrames and StubFrames.
+ void IterateCompiledFrame(ObjectVisitor* v) const;
+
private:
friend class StackFrame;
friend class StackFrameIterator;
@@ -461,7 +485,7 @@ class FrameSummary BASE_EMBEDDED {
Code* code,
int offset,
bool is_constructor)
- : receiver_(receiver),
+ : receiver_(receiver, function->GetIsolate()),
function_(function),
code_(code),
offset_(offset),
@@ -500,6 +524,9 @@ class JavaScriptFrame: public StandardFrame {
return GetNumberOfIncomingArguments();
}
+ // Debugger access.
+ void SetParameterValue(int index, Object* value) const;
+
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
@@ -534,7 +561,10 @@ class JavaScriptFrame: public StandardFrame {
return static_cast<JavaScriptFrame*>(frame);
}
- static void PrintTop(FILE* file, bool print_args, bool print_line_number);
+ static void PrintTop(Isolate* isolate,
+ FILE* file,
+ bool print_args,
+ bool print_line_number);
protected:
inline explicit JavaScriptFrame(StackFrameIterator* iterator);
@@ -555,6 +585,27 @@ class JavaScriptFrame: public StandardFrame {
};
+class StubFrame : public StandardFrame {
+ public:
+ virtual Type type() const { return STUB; }
+
+ // GC support.
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ // Determine the code for the frame.
+ virtual Code* unchecked_code() const;
+
+ protected:
+ inline explicit StubFrame(StackFrameIterator* iterator);
+
+ virtual Address GetCallerStackPointer() const;
+
+ virtual int GetNumberOfIncomingArguments() const;
+
+ friend class StackFrameIterator;
+};
+
+
class OptimizedFrame : public JavaScriptFrame {
public:
virtual Type type() const { return OPTIMIZED; }
@@ -640,6 +691,35 @@ class InternalFrame: public StandardFrame {
};
+class StubFailureTrampolineFrame: public StandardFrame {
+ public:
+ // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the
+ // presubmit script complains about using sizeof() on a type.
+ static const int kFirstRegisterParameterFrameOffset =
+ StandardFrameConstants::kMarkerOffset - 3 * kPointerSize;
+
+ static const int kCallerStackParameterCountFrameOffset =
+ StandardFrameConstants::kMarkerOffset - 2 * kPointerSize;
+
+ virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; }
+
+ // Get the code associated with this frame.
+ // This method could be called during marking phase of GC.
+ virtual Code* unchecked_code() const;
+
+ virtual void Iterate(ObjectVisitor* v) const;
+
+ protected:
+ inline explicit StubFailureTrampolineFrame(
+ StackFrameIterator* iterator);
+
+ virtual Address GetCallerStackPointer() const;
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
// Construct frames are special trampoline frames introduced to handle
// function invocations through 'new'.
class ConstructFrame: public InternalFrame {
@@ -661,10 +741,6 @@ class ConstructFrame: public InternalFrame {
class StackFrameIterator BASE_EMBEDDED {
public:
- // An iterator that iterates over the current thread's stack,
- // and uses current isolate.
- StackFrameIterator();
-
// An iterator that iterates over the isolate's current thread's stack,
explicit StackFrameIterator(Isolate* isolate);
@@ -724,8 +800,6 @@ class StackFrameIterator BASE_EMBEDDED {
template<typename Iterator>
class JavaScriptFrameIteratorTemp BASE_EMBEDDED {
public:
- JavaScriptFrameIteratorTemp() { if (!done()) Advance(); }
-
inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate);
inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top);
@@ -884,6 +958,8 @@ class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator {
class StackFrameLocator BASE_EMBEDDED {
public:
+ explicit StackFrameLocator(Isolate* isolate) : iterator_(isolate) {}
+
// Find the nth JavaScript frame on the stack. The caller must
// guarantee that such a frame exists.
JavaScriptFrame* FindJavaScriptFrame(int n);
@@ -895,7 +971,7 @@ class StackFrameLocator BASE_EMBEDDED {
// Reads all frames on the current stack and copies them into the current
// zone memory.
-Vector<StackFrame*> CreateStackMap(Zone* zone);
+Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone);
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/full-codegen.cc b/src/3rdparty/v8/src/full-codegen.cc
index 9592e0a..a43f674 100644
--- a/src/3rdparty/v8/src/full-codegen.cc
+++ b/src/3rdparty/v8/src/full-codegen.cc
@@ -86,6 +86,10 @@ void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
+void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
+}
+
+
void BreakableStatementChecker::VisitBlock(Block* stmt) {
}
@@ -303,6 +307,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
+ LOG_CODE_EVENT(isolate,
+ CodeStartLinePosInfoRecordEvent(masm.positions_recorder()));
FullCodeGenerator cgen(&masm, info);
cgen.Generate();
@@ -340,6 +346,11 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
+ if (!code.is_null()) {
+ void* line_info =
+ masm.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
+ }
return !code.is_null();
}
@@ -394,6 +405,7 @@ void FullCodeGenerator::Initialize() {
!Snapshot::HaveASnapshotToStartFrom();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
+ InitializeAstVisitor();
}
@@ -443,18 +455,8 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) {
StateField::encode(state) | PcField::encode(masm_->pc_offset());
ASSERT(Smi::IsValid(pc_and_state));
BailoutEntry entry = { id, pc_and_state };
-#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Assert that we don't have multiple bailout entries for the same node.
- for (int i = 0; i < bailout_entries_.length(); i++) {
- if (bailout_entries_.at(i).id == entry.id) {
- AstPrinter printer;
- PrintF("%s", printer.PrintProgram(info_->function()));
- UNREACHABLE();
- }
- }
- }
-#endif // DEBUG
+ ASSERT(!prepared_bailout_ids_.Contains(id.ToInt()));
+ prepared_bailout_ids_.Add(id.ToInt(), zone());
bailout_entries_.Add(entry, zone());
}
@@ -466,9 +468,8 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
}
-void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
- // The pc offset does not need to be encoded and packed together with a
- // state.
+void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
+ // The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry, zone());
@@ -582,16 +583,137 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
}
+void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
+ ASSERT(scope_->is_global_scope());
+
+ for (int i = 0; i < declarations->length(); i++) {
+ ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
+ if (declaration != NULL) {
+ ModuleLiteral* module = declaration->module()->AsModuleLiteral();
+ if (module != NULL) {
+ Comment cmnt(masm_, "[ Link nested modules");
+ Scope* scope = module->body()->scope();
+ Interface* interface = scope->interface();
+ ASSERT(interface->IsModule() && interface->IsFrozen());
+
+ interface->Allocate(scope->module_var()->index());
+
+ // Set up module context.
+ ASSERT(scope->interface()->Index() >= 0);
+ __ Push(Smi::FromInt(scope->interface()->Index()));
+ __ Push(scope->GetScopeInfo());
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+
+ AllocateModules(scope->declarations());
+
+ // Pop module context.
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ }
+ }
+}
+
+
+// Modules have their own local scope, represented by their own context.
+// Module instance objects have an accessor for every export that forwards
+// access to the respective slot from the module's context. (Exports that are
+// modules themselves, however, are simple data properties.)
+//
+// All modules have a _hosting_ scope/context, which (currently) is the
+// (innermost) enclosing global scope. To deal with recursion, nested modules
+// are hosted by the same scope as global ones.
+//
+// For every (global or nested) module literal, the hosting context has an
+// internal slot that points directly to the respective module context. This
+// enables quick access to (statically resolved) module members by 2-dimensional
+// access through the hosting context. For example,
+//
+// module A {
+// let x;
+// module B { let y; }
+// }
+// module C { let z; }
+//
+// allocates contexts as follows:
+//
+// [header| .A | .B | .C | A | C ] (global)
+// | | |
+// | | +-- [header| z ] (module)
+// | |
+// | +------- [header| y ] (module)
+// |
+// +------------ [header| x | B ] (module)
+//
+// Here, .A, .B, .C are the internal slots pointing to the hosted module
+// contexts, whereas A, B, C hold the actual instance objects (note that every
+// module context also points to the respective instance object through its
+// extension slot in the header).
+//
+// To deal with arbitrary recursion and aliases between modules,
+// they are created and initialized in several stages. Each stage applies to
+// all modules in the hosting global scope, including nested ones.
+//
+// 1. Allocate: for each module _literal_, allocate the module contexts and
+// respective instance object and wire them up. This happens in the
+// PushModuleContext runtime function, as generated by AllocateModules
+// (invoked by VisitDeclarations in the hosting scope).
+//
+// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
+// assign the respective instance object to respective local variables. This
+// happens in VisitModuleDeclaration, and uses the instance objects created
+// in the previous stage.
+// For each module _literal_, this phase also constructs a module descriptor
+// for the next stage. This happens in VisitModuleLiteral.
+//
+// 3. Populate: invoke the DeclareModules runtime function to populate each
+// _instance_ object with accessors for it exports. This is generated by
+// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
+// and uses the descriptors generated in the previous stage.
+//
+// 4. Initialize: execute the module bodies (and other code) in sequence. This
+// happens by the separate statements generated for module bodies. To reenter
+// the module scopes properly, the parser inserted ModuleStatements.
+
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
+ Handle<FixedArray> saved_modules = modules_;
+ int saved_module_index = module_index_;
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
+ if (scope_->num_modules() != 0) {
+ // This is a scope hosting modules. Allocate a descriptor array to pass
+ // to the runtime for initialization.
+ Comment cmnt(masm_, "[ Allocate modules");
+ ASSERT(scope_->is_global_scope());
+ modules_ =
+ isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
+ module_index_ = 0;
+
+ // Generate code for allocating all modules, including nested ones.
+ // The allocated contexts are stored in internal variables in this scope.
+ AllocateModules(declarations);
+ }
+
AstVisitor::VisitDeclarations(declarations);
+
+ if (scope_->num_modules() != 0) {
+ // Initialize modules from descriptor array.
+ ASSERT(module_index_ == modules_->length());
+ DeclareModules(modules_);
+ modules_ = saved_modules;
+ module_index_ = saved_module_index;
+ }
+
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
- // declaration the global functions and variables.
+ // declaration of the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
@@ -604,19 +726,23 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
- // Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Interface* interface = module->interface();
- Handle<JSModule> instance = interface->Instance();
+ Interface* interface = scope_->interface();
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
+ ASSERT(!modules_.is_null());
+ ASSERT(module_index_ < modules_->length());
+ int index = module_index_++;
+
// Set up module context.
- __ Push(instance);
- __ CallRuntime(Runtime::kPushModuleContext, 1);
+ ASSERT(interface->Index() >= 0);
+ __ Push(Smi::FromInt(interface->Index()));
+ __ Push(Smi::FromInt(0));
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -624,6 +750,11 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
VisitDeclarations(scope_->declarations());
}
+ // Populate the module description.
+ Handle<ModuleInfo> description =
+ ModuleInfo::Create(isolate(), interface, scope_);
+ modules_->set(index, *description);
+
scope_ = saved_scope;
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -644,8 +775,20 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) {
}
-void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
- // TODO(rossberg)
+void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
+ // TODO(rossberg): dummy allocation for now.
+ Scope* scope = module->body()->scope();
+ Interface* interface = scope_->interface();
+
+ ASSERT(interface->IsModule() && interface->IsFrozen());
+ ASSERT(!modules_.is_null());
+ ASSERT(module_index_ < modules_->length());
+ interface->Allocate(scope->module_var()->index());
+ int index = module_index_++;
+
+ Handle<ModuleInfo> description =
+ ModuleInfo::Create(isolate(), interface, scope_);
+ modules_->set(index, *description);
}
@@ -904,37 +1047,28 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
scope_ = stmt->scope();
- if (scope_->is_module_scope()) {
- // If this block is a module body, then we have already allocated and
- // initialized the declarations earlier. Just push the context.
- ASSERT(!scope_->interface()->Instance().is_null());
- __ Push(scope_->interface()->Instance());
- __ CallRuntime(Runtime::kPushModuleContext, 1);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
- } else {
- { Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots =
- scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
- }
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
+ ASSERT(!scope_->is_module_scope());
+ { Comment cmnt(masm_, "[ Extend block context");
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
+ PushFunctionArgumentForContextAllocation();
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
}
+
+ // Replace the context stored in the frame.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope_->declarations());
}
}
+
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
scope_ = saved_scope;
@@ -951,6 +1085,26 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
+void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
+ Comment cmnt(masm_, "[ Module context");
+
+ __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
+ __ Push(Smi::FromInt(0));
+ __ CallRuntime(Runtime::kPushModuleContext, 2);
+ StoreToFrameField(
+ StandardFrameConstants::kContextOffset, context_register());
+
+ Scope* saved_scope = scope_;
+ scope_ = stmt->body()->scope();
+ VisitStatements(stmt->body()->statements());
+ scope_ = saved_scope;
+ LoadContextField(context_register(), Context::PREVIOUS_INDEX);
+ // Update local stack frame context field.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+}
+
+
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@@ -1111,7 +1265,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1140,7 +1294,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
@@ -1186,7 +1340,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
diff --git a/src/3rdparty/v8/src/full-codegen.h b/src/3rdparty/v8/src/full-codegen.h
index 972839e..b9285c7 100644
--- a/src/3rdparty/v8/src/full-codegen.h
+++ b/src/3rdparty/v8/src/full-codegen.h
@@ -35,6 +35,7 @@
#include "code-stubs.h"
#include "codegen.h"
#include "compiler.h"
+#include "data-flow.h"
namespace v8 {
namespace internal {
@@ -48,7 +49,9 @@ class JumpPatchSite;
// debugger to piggybag on.
class BreakableStatementChecker: public AstVisitor {
public:
- BreakableStatementChecker() : is_breakable_(false) {}
+ BreakableStatementChecker() : is_breakable_(false) {
+ InitializeAstVisitor();
+ }
void Check(Statement* stmt);
void Check(Expression* stmt);
@@ -63,6 +66,7 @@ class BreakableStatementChecker: public AstVisitor {
bool is_breakable_;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker);
};
@@ -396,9 +400,15 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
+ void DeclareModules(Handle<FixedArray> descriptions);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
+ // Generate code to allocate all (including nested) modules and contexts.
+ // Because of recursive linking and the presence of module alias declarations,
+ // this has to be a separate pass _before_ populating or executing any module.
+ void AllocateModules(ZoneList<Declaration*>* declarations);
+
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
@@ -442,14 +452,13 @@ class FullCodeGenerator: public AstVisitor {
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
- // Platform-specific code for checking the stack limit at the back edge of
- // a loop.
// This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
- void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
- // Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(BailoutId osr_ast_id);
+ void EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target);
+ // Record the OSR AST id corresponding to a back edge in the code.
+ void RecordBackEdge(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -805,8 +814,13 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
ZoneList<Handle<Object> >* globals_;
+ Handle<FixedArray> modules_;
+ int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
+ GrowableBitVector prepared_bailout_ids_;
+ // TODO(svenpanne) Rename this to something like back_edges_ and rename
+ // related functions accordingly.
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
@@ -817,6 +831,7 @@ class FullCodeGenerator: public AstVisitor {
friend class NestedStatement;
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator);
};
diff --git a/src/3rdparty/v8/src/func-name-inferrer.cc b/src/3rdparty/v8/src/func-name-inferrer.cc
index 2dd0bbc..84d3bf0 100644
--- a/src/3rdparty/v8/src/func-name-inferrer.cc
+++ b/src/3rdparty/v8/src/func-name-inferrer.cc
@@ -55,14 +55,14 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
void FuncNameInferrer::PushLiteralName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) {
names_stack_.Add(Name(name, kLiteralName), zone());
}
}
void FuncNameInferrer::PushVariableName(Handle<String> name) {
- if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) {
+ if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) {
names_stack_.Add(Name(name, kVariableName), zone());
}
}
@@ -85,7 +85,7 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
if (prev->length() > 0) {
Factory* factory = isolate()->factory();
Handle<String> curr = factory->NewConsString(
- factory->dot_symbol(), names_stack_.at(pos).name);
+ factory->dot_string(), names_stack_.at(pos).name);
return MakeNameFromStackHelper(pos + 1,
factory->NewConsString(prev, curr));
} else {
diff --git a/src/3rdparty/v8/src/global-handles.cc b/src/3rdparty/v8/src/global-handles.cc
index 0006f8e..299449a 100644
--- a/src/3rdparty/v8/src/global-handles.cc
+++ b/src/3rdparty/v8/src/global-handles.cc
@@ -46,7 +46,7 @@ class GlobalHandles::Node {
// State transition diagram:
// FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
enum State {
- FREE,
+ FREE = 0,
NORMAL, // Normal global handle.
WEAK, // Flagged as weak but not yet finalized.
PENDING, // Has been recognized as only reachable by weak handles.
@@ -59,53 +59,70 @@ class GlobalHandles::Node {
return reinterpret_cast<Node*>(location);
}
- Node() {}
+ Node() {
+ ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
+ ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
+ STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
+ Internals::kNodeStateMask);
+ STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
+ STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue);
+ STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) ==
+ Internals::kNodeIsIndependentShift);
+ STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) ==
+ Internals::kNodeIsPartiallyDependentShift);
+ }
#ifdef DEBUG
~Node() {
// TODO(1428): if it's a weak handle we should have invoked its callback.
// Zap the values for eager trapping.
- object_ = NULL;
+ object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
- independent_ = false;
- partially_dependent_ = false;
- in_new_space_list_ = false;
+ set_independent(false);
+ set_partially_dependent(false);
+ set_in_new_space_list(false);
parameter_or_next_free_.next_free = NULL;
- callback_ = NULL;
+ weak_reference_callback_ = NULL;
+ near_death_callback_ = NULL;
}
#endif
void Initialize(int index, Node** first_free) {
index_ = static_cast<uint8_t>(index);
ASSERT(static_cast<int>(index_) == index);
- state_ = FREE;
- in_new_space_list_ = false;
+ set_state(FREE);
+ set_in_new_space_list(false);
parameter_or_next_free_.next_free = *first_free;
*first_free = this;
}
void Acquire(Object* object, GlobalHandles* global_handles) {
- ASSERT(state_ == FREE);
+ ASSERT(state() == FREE);
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
- independent_ = false;
- partially_dependent_ = false;
- state_ = NORMAL;
+ set_independent(false);
+ set_partially_dependent(false);
+ set_state(NORMAL);
parameter_or_next_free_.parameter = NULL;
- callback_ = NULL;
+ weak_reference_callback_ = NULL;
+ near_death_callback_ = NULL;
IncreaseBlockUses(global_handles);
}
void Release(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
- if (IsWeakRetainer()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = FREE;
+ ASSERT(state() != FREE);
+ set_state(FREE);
+ // TODO(176056): Enable as soon as WebKit bindings are fixed.
+#ifdef DEBUG_TODO
+ // Zap the values for eager trapping.
+ object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
+ class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
+ set_independent(false);
+ set_partially_dependent(false);
+ weak_reference_callback_ = NULL;
+ near_death_callback_ = NULL;
+#endif
parameter_or_next_free_.next_free = global_handles->first_free_;
global_handles->first_free_ = this;
DecreaseBlockUses(global_handles);
@@ -120,115 +137,124 @@ class GlobalHandles::Node {
bool has_wrapper_class_id() const {
return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId;
}
+
uint16_t wrapper_class_id() const { return class_id_; }
- void set_wrapper_class_id(uint16_t class_id) {
- class_id_ = class_id;
+
+ // State and flag accessors.
+
+ State state() const {
+ return NodeState::decode(flags_);
+ }
+ void set_state(State state) {
+ flags_ = NodeState::update(flags_, state);
}
- // State accessors.
+ bool is_independent() {
+ return IsIndependent::decode(flags_);
+ }
+ void set_independent(bool v) {
+ flags_ = IsIndependent::update(flags_, v);
+ }
- State state() const { return state_; }
+ bool is_partially_dependent() {
+ return IsPartiallyDependent::decode(flags_);
+ }
+ void set_partially_dependent(bool v) {
+ flags_ = IsPartiallyDependent::update(flags_, v);
+ }
+
+ bool is_in_new_space_list() {
+ return IsInNewSpaceList::decode(flags_);
+ }
+ void set_in_new_space_list(bool v) {
+ flags_ = IsInNewSpaceList::update(flags_, v);
+ }
bool IsNearDeath() const {
// Check for PENDING to ensure correct answer when processing callbacks.
- return state_ == PENDING || state_ == NEAR_DEATH;
+ return state() == PENDING || state() == NEAR_DEATH;
}
- bool IsWeak() const { return state_ == WEAK; }
+ bool IsWeak() const { return state() == WEAK; }
- bool IsRetainer() const { return state_ != FREE; }
+ bool IsRetainer() const { return state() != FREE; }
- bool IsStrongRetainer() const { return state_ == NORMAL; }
+ bool IsStrongRetainer() const { return state() == NORMAL; }
bool IsWeakRetainer() const {
- return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH;
+ return state() == WEAK || state() == PENDING || state() == NEAR_DEATH;
}
void MarkPending() {
- ASSERT(state_ == WEAK);
- state_ = PENDING;
+ ASSERT(state() == WEAK);
+ set_state(PENDING);
}
// Independent flag accessors.
void MarkIndependent() {
- ASSERT(state_ != FREE);
- independent_ = true;
+ ASSERT(state() != FREE);
+ set_independent(true);
}
- bool is_independent() const { return independent_; }
void MarkPartiallyDependent(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
+ ASSERT(state() != FREE);
if (global_handles->isolate()->heap()->InNewSpace(object_)) {
- partially_dependent_ = true;
+ set_partially_dependent(true);
}
}
- bool is_partially_dependent() const { return partially_dependent_; }
- void clear_partially_dependent() { partially_dependent_ = false; }
-
- // In-new-space-list flag accessors.
- void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
- bool is_in_new_space_list() const { return in_new_space_list_; }
+ void clear_partially_dependent() { set_partially_dependent(false); }
// Callback accessor.
- WeakReferenceCallback callback() { return callback_; }
+ // TODO(svenpanne) Re-enable or nuke later.
+ // WeakReferenceCallback callback() { return callback_; }
// Callback parameter accessors.
void set_parameter(void* parameter) {
- ASSERT(state_ != FREE);
+ ASSERT(state() != FREE);
parameter_or_next_free_.parameter = parameter;
}
void* parameter() const {
- ASSERT(state_ != FREE);
+ ASSERT(state() != FREE);
return parameter_or_next_free_.parameter;
}
// Accessors for next free node in the free list.
Node* next_free() {
- ASSERT(state_ == FREE);
+ ASSERT(state() == FREE);
return parameter_or_next_free_.next_free;
}
void set_next_free(Node* value) {
- ASSERT(state_ == FREE);
+ ASSERT(state() == FREE);
parameter_or_next_free_.next_free = value;
}
void MakeWeak(GlobalHandles* global_handles,
void* parameter,
- WeakReferenceCallback callback) {
- ASSERT(state_ != FREE);
- if (!IsWeakRetainer()) {
- global_handles->number_of_weak_handles_++;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_++;
- }
- }
- state_ = WEAK;
+ WeakReferenceCallback weak_reference_callback,
+ NearDeathCallback near_death_callback) {
+ ASSERT(state() != FREE);
+ set_state(WEAK);
set_parameter(parameter);
- callback_ = callback;
+ weak_reference_callback_ = weak_reference_callback;
+ near_death_callback_ = near_death_callback;
}
void ClearWeakness(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
- if (IsWeakRetainer()) {
- global_handles->number_of_weak_handles_--;
- if (object_->IsJSGlobalObject()) {
- global_handles->number_of_global_object_weak_handles_--;
- }
- }
- state_ = NORMAL;
+ ASSERT(state() != FREE);
+ set_state(NORMAL);
set_parameter(NULL);
}
bool PostGarbageCollectionProcessing(Isolate* isolate,
GlobalHandles* global_handles) {
- if (state_ != Node::PENDING) return false;
- WeakReferenceCallback func = callback();
- if (func == NULL) {
+ if (state() != Node::PENDING) return false;
+ if (weak_reference_callback_ == NULL &&
+ near_death_callback_ == NULL) {
Release(global_handles);
return false;
}
void* par = parameter();
- state_ = NEAR_DEATH;
+ set_state(NEAR_DEATH);
set_parameter(NULL);
v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle());
@@ -241,11 +267,18 @@ class GlobalHandles::Node {
ExternalTwoByteString::cast(object_)->resource() != NULL);
// Leaving V8.
VMState state(isolate, EXTERNAL);
- func(object, par);
+ if (weak_reference_callback_ != NULL) {
+ weak_reference_callback_(object, par);
+ }
+ if (near_death_callback_ != NULL) {
+ near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate),
+ object,
+ par);
+ }
}
// Absence of explicit cleanup or revival of weak handle
// in most of the cases would lead to memory leak.
- ASSERT(state_ != NEAR_DEATH);
+ ASSERT(state() != NEAR_DEATH);
return true;
}
@@ -267,15 +300,18 @@ class GlobalHandles::Node {
// Index in the containing handle block.
uint8_t index_;
- // Need one more bit for MSVC as it treats enums as signed.
- State state_ : 4;
+ // This stores three flags (independent, partially_dependent and
+ // in_new_space_list) and a State.
+ class NodeState: public BitField<State, 0, 4> {};
+ class IsIndependent: public BitField<bool, 4, 1> {};
+ class IsPartiallyDependent: public BitField<bool, 5, 1> {};
+ class IsInNewSpaceList: public BitField<bool, 6, 1> {};
- bool independent_ : 1;
- bool partially_dependent_ : 1;
- bool in_new_space_list_ : 1;
+ uint8_t flags_;
// Handle specific callback.
- WeakReferenceCallback callback_;
+ WeakReferenceCallback weak_reference_callback_;
+ NearDeathCallback near_death_callback_;
// Provided data for callback. In FREE state, this is used for
// the free list link.
@@ -394,8 +430,6 @@ class GlobalHandles::NodeIterator {
GlobalHandles::GlobalHandles(Isolate* isolate)
: isolate_(isolate),
- number_of_weak_handles_(0),
- number_of_global_object_weak_handles_(0),
number_of_global_handles_(0),
first_block_(NULL),
first_used_block_(NULL),
@@ -443,10 +477,16 @@ void GlobalHandles::Destroy(Object** location) {
}
-void GlobalHandles::MakeWeak(Object** location, void* parameter,
- WeakReferenceCallback callback) {
- ASSERT(callback != NULL);
- Node::FromLocation(location)->MakeWeak(this, parameter, callback);
+void GlobalHandles::MakeWeak(Object** location,
+ void* parameter,
+ WeakReferenceCallback weak_reference_callback,
+ NearDeathCallback near_death_callback) {
+ ASSERT((weak_reference_callback != NULL) !=
+ (near_death_callback != NULL));
+ Node::FromLocation(location)->MakeWeak(this,
+ parameter,
+ weak_reference_callback,
+ near_death_callback);
}
@@ -480,14 +520,6 @@ bool GlobalHandles::IsWeak(Object** location) {
}
-void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) {
- Node::FromLocation(location)->set_wrapper_class_id(class_id);
-}
-
-uint16_t GlobalHandles::GetWrapperClassId(Object** location) {
- return Node::FromLocation(location)->wrapper_class_id();
-}
-
void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location());
@@ -495,16 +527,6 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
}
-void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback) {
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeak() && it.node()->callback() == callback) {
- f(it.node()->object(), it.node()->parameter());
- }
- }
-}
-
-
void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsWeak() && f(it.node()->location())) {
@@ -551,8 +573,53 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
}
+bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
+ WeakSlotCallbackWithHeap can_skip) {
+ int last = 0;
+ bool any_group_was_visited = false;
+ for (int i = 0; i < object_groups_.length(); i++) {
+ ObjectGroup* entry = object_groups_.at(i);
+ ASSERT(entry != NULL);
+
+ Object*** objects = entry->objects_;
+ bool group_should_be_visited = false;
+ for (size_t j = 0; j < entry->length_; j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ if (!can_skip(isolate_->heap(), &object)) {
+ group_should_be_visited = true;
+ break;
+ }
+ }
+ }
+
+ if (!group_should_be_visited) {
+ object_groups_[last++] = entry;
+ continue;
+ }
+
+ // An object in the group requires visiting, so iterate over all
+ // objects in the group.
+ for (size_t j = 0; j < entry->length_; ++j) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ v->VisitPointer(&object);
+ any_group_was_visited = true;
+ }
+ }
+
+ // Once the entire group has been iterated over, set the object
+ // group to NULL so it won't be processed again.
+ entry->Dispose();
+ object_groups_.at(i) = NULL;
+ }
+ object_groups_.Rewind(last);
+ return any_group_was_visited;
+}
+
+
bool GlobalHandles::PostGarbageCollectionProcessing(
- GarbageCollector collector) {
+ GarbageCollector collector, GCTracer* tracer) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
@@ -602,10 +669,17 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
- new_space_nodes_[last++] = node;
+ if (node->IsRetainer()) {
+ if (isolate_->heap()->InNewSpace(node->object())) {
+ new_space_nodes_[last++] = node;
+ tracer->increment_nodes_copied_in_new_space();
+ } else {
+ node->set_in_new_space_list(false);
+ tracer->increment_nodes_promoted();
+ }
} else {
node->set_in_new_space_list(false);
+ tracer->increment_nodes_died_in_new_space();
}
}
new_space_nodes_.Rewind(last);
@@ -641,6 +715,40 @@ void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
}
+void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsRetainer() && node->has_wrapper_class_id()) {
+ v->VisitEmbedderReference(node->location(),
+ node->wrapper_class_id());
+ }
+ }
+}
+
+
+int GlobalHandles::NumberOfWeakHandles() {
+ int count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+
+int GlobalHandles::NumberOfGlobalObjectWeakHandles() {
+ int count = 0;
+ for (NodeIterator it(this); !it.done(); it.Advance()) {
+ if (it.node()->IsWeakRetainer() &&
+ it.node()->object()->IsJSGlobalObject()) {
+ count++;
+ }
+ }
+ return count;
+}
+
+
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
diff --git a/src/3rdparty/v8/src/global-handles.h b/src/3rdparty/v8/src/global-handles.h
index 482baef..9900144 100644
--- a/src/3rdparty/v8/src/global-handles.h
+++ b/src/3rdparty/v8/src/global-handles.h
@@ -108,8 +108,6 @@ class ImplicitRefGroup {
};
-typedef void (*WeakReferenceGuest)(Object* object, void* parameter);
-
class GlobalHandles {
public:
~GlobalHandles();
@@ -128,21 +126,17 @@ class GlobalHandles {
// reason is that Smi::FromInt(0) does not change during garage collection.
void MakeWeak(Object** location,
void* parameter,
- WeakReferenceCallback callback);
+ WeakReferenceCallback weak_reference_callback,
+ NearDeathCallback near_death_callback);
- static void SetWrapperClassId(Object** location, uint16_t class_id);
- static uint16_t GetWrapperClassId(Object** location);
+ void RecordStats(HeapStats* stats);
// Returns the current number of weak handles.
- int NumberOfWeakHandles() { return number_of_weak_handles_; }
-
- void RecordStats(HeapStats* stats);
+ int NumberOfWeakHandles();
// Returns the current number of weak handles to global objects.
// These handles are also included in NumberOfWeakHandles().
- int NumberOfGlobalObjectWeakHandles() {
- return number_of_global_object_weak_handles_;
- }
+ int NumberOfGlobalObjectWeakHandles();
// Returns the current number of handles to global objects.
int NumberOfGlobalHandles() {
@@ -168,7 +162,8 @@ class GlobalHandles {
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector);
+ bool PostGarbageCollectionProcessing(GarbageCollector collector,
+ GCTracer* tracer);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@@ -179,13 +174,13 @@ class GlobalHandles {
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(ObjectVisitor* v);
+ // Iterates over all handles in the new space that have embedder-assigned
+ // class ID.
+ void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v);
+
// Iterates over all weak roots in heap.
void IterateWeakRoots(ObjectVisitor* v);
- // Iterates over weak roots that are bound to a given callback.
- void IterateWeakRoots(WeakReferenceGuest f,
- WeakReferenceCallback callback);
-
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
void IdentifyWeakHandles(WeakSlotCallback f);
@@ -206,6 +201,11 @@ class GlobalHandles {
// See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ // Iterate over objects in object groups that have at least one object
+ // which requires visiting. The callback has to return true if objects
+ // can be skipped and false otherwise.
+ bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
+
// Add an object group.
// Should be only used in GC callback function before a collection.
// All groups are destroyed after a garbage collection.
@@ -252,14 +252,6 @@ class GlobalHandles {
Isolate* isolate_;
- // Field always containing the number of weak and near-death handles.
- int number_of_weak_handles_;
-
- // Field always containing the number of weak and near-death handles
- // to global objects. These objects are also included in
- // number_of_weak_handles_.
- int number_of_global_object_weak_handles_;
-
// Field always containing the number of handles to global objects.
int number_of_global_handles_;
diff --git a/src/3rdparty/v8/src/globals.h b/src/3rdparty/v8/src/globals.h
index 74c12f8..7205361 100644
--- a/src/3rdparty/v8/src/globals.h
+++ b/src/3rdparty/v8/src/globals.h
@@ -253,15 +253,13 @@ const int kBinary32ExponentShift = 23;
// other bits set.
const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51;
-// ASCII/UTF-16 constants
+// Latin1/UTF-16 constants
// Code-point values in Unicode 4.0 are 21 bits wide.
// Code units in UTF-16 are 16 bits wide.
typedef uint16_t uc16;
typedef int32_t uc32;
-const int kASCIISize = kCharSize;
+const int kOneByteSize = kCharSize;
const int kUC16Size = sizeof(uc16); // NOLINT
-const uc32 kMaxAsciiCharCode = 0x7f;
-const uint32_t kMaxAsciiCharCodeU = 0x7fu;
// The expression OFFSET_OF(type, field) computes the byte-offset
diff --git a/src/3rdparty/v8/src/handles-inl.h b/src/3rdparty/v8/src/handles-inl.h
index 1307986..c1daae2 100644
--- a/src/3rdparty/v8/src/handles-inl.h
+++ b/src/3rdparty/v8/src/handles-inl.h
@@ -37,25 +37,17 @@
namespace v8 {
namespace internal {
-inline Isolate* GetIsolateForHandle(Object* obj) {
- return Isolate::Current();
-}
-
-inline Isolate* GetIsolateForHandle(HeapObject* obj) {
- return obj->GetIsolate();
-}
-
template<typename T>
Handle<T>::Handle(T* obj) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj));
+ location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj);
}
template<typename T>
Handle<T>::Handle(T* obj, Isolate* isolate) {
ASSERT(!obj->IsFailure());
- location_ = HandleScope::CreateHandle(obj, isolate);
+ location_ = HandleScope::CreateHandle(isolate, obj);
}
@@ -63,23 +55,20 @@ template <typename T>
inline T* Handle<T>::operator*() const {
ASSERT(location_ != NULL);
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
+ SLOW_ASSERT(ISOLATE->allow_handle_deref());
return *BitCast<T**>(location_);
}
-
-HandleScope::HandleScope() {
- Isolate* isolate = Isolate::Current();
- v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
- isolate_ = isolate;
- prev_next_ = current->next;
- prev_limit_ = current->limit;
- current->level++;
+template <typename T>
+inline T** Handle<T>::location() const {
+ ASSERT(location_ == NULL ||
+ reinterpret_cast<Address>(*location_) != kZapValue);
+ SLOW_ASSERT(ISOLATE->allow_handle_deref());
+ return location_;
}
HandleScope::HandleScope(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
isolate_ = isolate;
@@ -94,7 +83,6 @@ HandleScope::~HandleScope() {
}
void HandleScope::CloseScope() {
- ASSERT(isolate_ == Isolate::Current());
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
current->next = prev_next_;
@@ -118,7 +106,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
isolate_->handle_scope_data();
// Allocate one handle in the parent scope.
ASSERT(current->level > 0);
- Handle<T> result(CreateHandle<T>(value, isolate_));
+ Handle<T> result(CreateHandle<T>(isolate_, value));
// Reinitialize the current scope (so that it's ready
// to be used or closed again).
prev_next_ = current->next;
@@ -129,13 +117,12 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) {
template <typename T>
-T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
+T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
internal::Object** cur = current->next;
- if (cur == current->limit) cur = Extend();
+ if (cur == current->limit) cur = Extend(isolate);
// Update the current next field, set the value in the created
// handle, and return the result.
ASSERT(cur < current->limit);
@@ -148,10 +135,10 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) {
#ifdef DEBUG
-inline NoHandleAllocation::NoHandleAllocation() {
- Isolate* isolate = Isolate::Current();
+inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate)
+ : isolate_(isolate) {
v8::ImplementationUtilities::HandleScopeData* current =
- isolate->handle_scope_data();
+ isolate_->handle_scope_data();
active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread();
if (active_) {
@@ -170,11 +157,43 @@ inline NoHandleAllocation::~NoHandleAllocation() {
// Restore state in current handle scope to re-enable handle
// allocations.
v8::ImplementationUtilities::HandleScopeData* data =
- Isolate::Current()->handle_scope_data();
+ isolate_->handle_scope_data();
ASSERT_EQ(0, data->level);
data->level = level_;
}
}
+
+
+NoHandleDereference::NoHandleDereference(Isolate* isolate)
+ : isolate_(isolate) {
+ // The guard is set on a per-isolate basis, so it affects all threads.
+ // That's why we can only use it when running without parallel recompilation.
+ if (FLAG_parallel_recompilation) return;
+ old_state_ = isolate->allow_handle_deref();
+ isolate_->set_allow_handle_deref(false);
+}
+
+
+NoHandleDereference::~NoHandleDereference() {
+ if (FLAG_parallel_recompilation) return;
+ isolate_->set_allow_handle_deref(old_state_);
+}
+
+
+AllowHandleDereference::AllowHandleDereference(Isolate* isolate)
+ : isolate_(isolate) {
+ // The guard is set on a per-isolate basis, so it affects all threads.
+ // That's why we can only use it when running without parallel recompilation.
+ if (FLAG_parallel_recompilation) return;
+ old_state_ = isolate->allow_handle_deref();
+ isolate_->set_allow_handle_deref(true);
+}
+
+
+AllowHandleDereference::~AllowHandleDereference() {
+ if (FLAG_parallel_recompilation) return;
+ isolate_->set_allow_handle_deref(old_state_);
+}
#endif
diff --git a/src/3rdparty/v8/src/handles.cc b/src/3rdparty/v8/src/handles.cc
index a6192d8..7496cc1 100644
--- a/src/3rdparty/v8/src/handles.cc
+++ b/src/3rdparty/v8/src/handles.cc
@@ -45,8 +45,7 @@ namespace v8 {
namespace internal {
-int HandleScope::NumberOfHandles() {
- Isolate* isolate = Isolate::Current();
+int HandleScope::NumberOfHandles(Isolate* isolate) {
HandleScopeImplementer* impl = isolate->handle_scope_implementer();
int n = impl->blocks()->length();
if (n == 0) return 0;
@@ -55,8 +54,7 @@ int HandleScope::NumberOfHandles() {
}
-Object** HandleScope::Extend() {
- Isolate* isolate = Isolate::Current();
+Object** HandleScope::Extend(Isolate* isolate) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
@@ -97,7 +95,6 @@ Object** HandleScope::Extend() {
void HandleScope::DeleteExtensions(Isolate* isolate) {
- ASSERT(isolate == Isolate::Current());
v8::ImplementationUtilities::HandleScopeData* current =
isolate->handle_scope_data();
isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
@@ -112,21 +109,18 @@ void HandleScope::ZapRange(Object** start, Object** end) {
}
-Address HandleScope::current_level_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->level);
+Address HandleScope::current_level_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
}
-Address HandleScope::current_next_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->next);
+Address HandleScope::current_next_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
}
-Address HandleScope::current_limit_address() {
- return reinterpret_cast<Address>(
- &Isolate::Current()->handle_scope_data()->limit);
+Address HandleScope::current_limit_address(Isolate* isolate) {
+ return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
}
@@ -229,12 +223,12 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
}
-Handle<Object> SetProperty(Handle<Object> object,
+Handle<Object> SetProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
- Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate,
Runtime::SetObjectProperty(
@@ -282,14 +276,14 @@ Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
- Handle<String> str = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object);
}
-Handle<Object> GetProperty(Handle<Object> obj,
+Handle<Object> GetProperty(Isolate* isolate,
+ Handle<Object> obj,
Handle<Object> key) {
- Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(isolate,
Runtime::GetObjectProperty(isolate, obj, key), Object);
}
@@ -315,8 +309,8 @@ Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) {
}
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) {
- Isolate* isolate = Isolate::Current();
+Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+ uint32_t index) {
CALL_HEAP_FUNCTION(
isolate,
isolate->heap()->LookupSingleCharacterStringFromCode(index), Object);
@@ -350,14 +344,16 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
// collector will call the weak callback on the global handle
// associated with the wrapper and get rid of both the wrapper and the
// handle.
-static void ClearWrapperCache(Persistent<v8::Value> handle, void*) {
+static void ClearWrapperCache(v8::Isolate* v8_isolate,
+ Persistent<v8::Value> handle,
+ void*) {
Handle<Object> cache = Utils::OpenHandle(*handle);
JSValue* wrapper = JSValue::cast(*cache);
Foreign* foreign = Script::cast(wrapper->value())->wrapper();
ASSERT(foreign->foreign_address() ==
reinterpret_cast<Address>(cache.location()));
foreign->set_foreign_address(0);
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
isolate->global_handles()->Destroy(cache.location());
isolate->counters()->script_wrappers()->Decrement();
}
@@ -369,19 +365,30 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) {
return Handle<JSValue>(
reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
}
- Isolate* isolate = Isolate::Current();
+ Isolate* isolate = script->GetIsolate();
// Construct a new script wrapper.
isolate->counters()->script_wrappers()->Increment();
Handle<JSFunction> constructor = isolate->script_function();
Handle<JSValue> result =
Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor));
+
+ // The allocation might have triggered a GC, which could have called this
+ // function recursively, and a wrapper has already been created and cached.
+ // In that case, simply return the cached wrapper.
+ if (script->wrapper()->foreign_address() != NULL) {
+ return Handle<JSValue>(
+ reinterpret_cast<JSValue**>(script->wrapper()->foreign_address()));
+ }
+
result->set_value(*script);
// Create a new weak global handle and use it to cache the wrapper
// for future use. The cache will automatically be cleared by the
// garbage collector when it is not used anymore.
Handle<Object> handle = isolate->global_handles()->Create(*result);
- isolate->global_handles()->MakeWeak(handle.location(), NULL,
+ isolate->global_handles()->MakeWeak(handle.location(),
+ NULL,
+ NULL,
&ClearWrapperCache);
script->wrapper()->set_foreign_address(
reinterpret_cast<Address>(handle.location()));
@@ -423,7 +430,7 @@ static void CalculateLineEnds(Isolate* isolate,
Vector<const SourceChar> src,
bool with_last_line) {
const int src_len = src.length();
- StringSearch<char, SourceChar> search(isolate, CStrVector("\n"));
+ StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
// Find and record line ends.
int position = 0;
@@ -457,7 +464,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src,
if (content.IsAscii()) {
CalculateLineEnds(isolate,
&line_ends,
- content.ToAsciiVector(),
+ content.ToOneByteVector(),
with_last_line);
} else {
CalculateLineEnds(isolate,
@@ -596,9 +603,11 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
Handle<String> name_or_source_url_key =
- isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("nameOrSourceURL"));
Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
+ Handle<Object> property = GetProperty(isolate,
+ script_wrapper,
name_or_source_url_key);
ASSERT(property->IsJSFunction());
Handle<JSFunction> method = Handle<JSFunction>::cast(property);
@@ -638,7 +647,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object,
// Only collect keys if access is permitted.
for (Handle<Object> p = object;
*p != isolate->heap()->null_value();
- p = Handle<Object>(p->GetPrototype(), isolate)) {
+ p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
if (p->IsJSProxy()) {
Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
Handle<Object> args[] = { proxy };
@@ -882,168 +891,8 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
}
-// This method determines the type of string involved and then gets the UTF8
-// length of the string. It doesn't flatten the string and has log(n) recursion
-// for a string of length n. If the failure flag gets set, then we have to
-// flatten the string and retry. Failures are caused by surrogate pairs in deep
-// cons strings.
-
-// Single surrogate characters that are encountered in the UTF-16 character
-// sequence of the input string get counted as 3 UTF-8 bytes, because that
-// is the way that WriteUtf8 will encode them. Surrogate pairs are counted and
-// encoded as one 4-byte UTF-8 sequence.
-
-// This function conceptually uses recursion on the two halves of cons strings.
-// However, in order to avoid the recursion going too deep it recurses on the
-// second string of the cons, but iterates on the first substring (by manually
-// eliminating it as a tail recursion). This means it counts the UTF-8 length
-// from the end to the start, which makes no difference to the total.
-
-// Surrogate pairs are recognized even if they are split across two sides of a
-// cons, which complicates the implementation somewhat. Therefore, too deep
-// recursion cannot always be avoided. This case is detected, and the failure
-// flag is set, a signal to the caller that the string should be flattened and
-// the operation retried.
-int Utf8LengthHelper(String* input,
- int from,
- int to,
- bool followed_by_surrogate,
- int max_recursion,
- bool* failure,
- bool* starts_with_surrogate) {
- if (from == to) return 0;
- int total = 0;
- bool dummy;
- while (true) {
- if (input->IsAsciiRepresentation()) {
- *starts_with_surrogate = false;
- return total + to - from;
- }
- switch (StringShape(input).representation_tag()) {
- case kConsStringTag: {
- ConsString* str = ConsString::cast(input);
- String* first = str->first();
- String* second = str->second();
- int first_length = first->length();
- if (first_length - from > to - first_length) {
- if (first_length < to) {
- // Right hand side is shorter. No need to check the recursion depth
- // since this can only happen log(n) times.
- bool right_starts_with_surrogate = false;
- total += Utf8LengthHelper(second,
- 0,
- to - first_length,
- followed_by_surrogate,
- max_recursion - 1,
- failure,
- &right_starts_with_surrogate);
- if (*failure) return 0;
- followed_by_surrogate = right_starts_with_surrogate;
- input = first;
- to = first_length;
- } else {
- // We only need the left hand side.
- input = first;
- }
- } else {
- if (first_length > from) {
- // Left hand side is shorter.
- if (first->IsAsciiRepresentation()) {
- total += first_length - from;
- *starts_with_surrogate = false;
- starts_with_surrogate = &dummy;
- input = second;
- from = 0;
- to -= first_length;
- } else if (second->IsAsciiRepresentation()) {
- followed_by_surrogate = false;
- total += to - first_length;
- input = first;
- to = first_length;
- } else if (max_recursion > 0) {
- bool right_starts_with_surrogate = false;
- // Recursing on the long one. This may fail.
- total += Utf8LengthHelper(second,
- 0,
- to - first_length,
- followed_by_surrogate,
- max_recursion - 1,
- failure,
- &right_starts_with_surrogate);
- if (*failure) return 0;
- input = first;
- to = first_length;
- followed_by_surrogate = right_starts_with_surrogate;
- } else {
- *failure = true;
- return 0;
- }
- } else {
- // We only need the right hand side.
- input = second;
- from = 0;
- to -= first_length;
- }
- }
- continue;
- }
- case kExternalStringTag:
- case kSeqStringTag: {
- Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector();
- const uc16* p = vector.start();
- int previous = unibrow::Utf16::kNoPreviousCharacter;
- for (int i = from; i < to; i++) {
- uc16 c = p[i];
- total += unibrow::Utf8::Length(c, previous);
- previous = c;
- }
- if (to - from > 0) {
- if (unibrow::Utf16::IsLeadSurrogate(previous) &&
- followed_by_surrogate) {
- total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
- }
- if (unibrow::Utf16::IsTrailSurrogate(p[from])) {
- *starts_with_surrogate = true;
- }
- }
- return total;
- }
- case kSlicedStringTag: {
- SlicedString* str = SlicedString::cast(input);
- int offset = str->offset();
- input = str->parent();
- from += offset;
- to += offset;
- continue;
- }
- default:
- break;
- }
- UNREACHABLE();
- return 0;
- }
- return 0;
-}
-
-
-int Utf8Length(Handle<String> str) {
- bool dummy;
- bool failure;
- int len;
- const int kRecursionBudget = 100;
- do {
- failure = false;
- len = Utf8LengthHelper(
- *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy);
- if (failure) FlattenString(str);
- } while (failure);
- return len;
-}
-
-
DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
: impl_(isolate->handle_scope_implementer()) {
- ASSERT(impl_->isolate() == Isolate::Current());
impl_->BeginDeferredScope();
v8::ImplementationUtilities::HandleScopeData* data =
impl_->isolate()->handle_scope_data();
diff --git a/src/3rdparty/v8/src/handles.h b/src/3rdparty/v8/src/handles.h
index b80dbe5..b0b271c 100644
--- a/src/3rdparty/v8/src/handles.h
+++ b/src/3rdparty/v8/src/handles.h
@@ -58,25 +58,21 @@ class Handle {
a = b; // Fake assignment to enforce type checks.
USE(a);
#endif
- location_ = reinterpret_cast<T**>(handle.location());
+ location_ = reinterpret_cast<T**>(handle.location_);
}
INLINE(T* operator ->() const) { return operator*(); }
// Check if this handle refers to the exact same object as the other handle.
bool is_identical_to(const Handle<T> other) const {
- return operator*() == *other;
+ return *location_ == *other.location_;
}
// Provides the C++ dereference operator.
INLINE(T* operator*() const);
// Returns the address to where the raw pointer is stored.
- T** location() const {
- ASSERT(location_ == NULL ||
- reinterpret_cast<Address>(*location_) != kZapValue);
- return location_;
- }
+ INLINE(T** location() const);
template <class S> static Handle<T> cast(Handle<S> that) {
T::cast(*that);
@@ -92,13 +88,16 @@ class Handle {
private:
T** location_;
+
+ // Handles of different classes are allowed to access each other's location_.
+ template<class S> friend class Handle;
};
// Convenience wrapper.
template<class T>
-inline Handle<T> handle(T* t) {
- return Handle<T>(t);
+inline Handle<T> handle(T* t, Isolate* isolate) {
+ return Handle<T>(t, isolate);
}
@@ -120,24 +119,23 @@ class HandleScopeImplementer;
// for which the handle scope has been deleted is undefined.
class HandleScope {
public:
- inline HandleScope();
explicit inline HandleScope(Isolate* isolate);
inline ~HandleScope();
// Counts the number of allocated handles.
- static int NumberOfHandles();
+ static int NumberOfHandles(Isolate* isolate);
// Creates a new handle with the given value.
template <typename T>
- static inline T** CreateHandle(T* value, Isolate* isolate);
+ static inline T** CreateHandle(Isolate* isolate, T* value);
// Deallocates any extensions used by the current scope.
static void DeleteExtensions(Isolate* isolate);
- static Address current_next_address();
- static Address current_limit_address();
- static Address current_level_address();
+ static Address current_next_address(Isolate* isolate);
+ static Address current_limit_address(Isolate* isolate);
+ static Address current_level_address(Isolate* isolate);
// Closes the HandleScope (invalidating all handles
// created in the scope of the HandleScope) and returns
@@ -162,7 +160,7 @@ class HandleScope {
Object** prev_limit_;
// Extend the handle scope making room for more handles.
- static internal::Object** Extend();
+ static internal::Object** Extend(Isolate* isolate);
// Zaps the handles in the half-open interval [start, end).
static void ZapRange(internal::Object** start, internal::Object** end);
@@ -214,9 +212,8 @@ void FlattenString(Handle<String> str);
// string.
Handle<String> FlattenGetString(Handle<String> str);
-int Utf8Length(Handle<String> str);
-
-Handle<Object> SetProperty(Handle<Object> object,
+Handle<Object> SetProperty(Isolate* isolate,
+ Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
@@ -233,7 +230,8 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
-Handle<Object> GetProperty(Handle<Object> obj,
+Handle<Object> GetProperty(Isolate* isolate,
+ Handle<Object> obj,
Handle<Object> key);
Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
@@ -243,7 +241,8 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver,
Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value);
-Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index);
+Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
+ uint32_t index);
Handle<JSObject> Copy(Handle<JSObject> obj);
@@ -329,17 +328,48 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table,
class NoHandleAllocation BASE_EMBEDDED {
public:
#ifndef DEBUG
- NoHandleAllocation() {}
+ explicit NoHandleAllocation(Isolate* isolate) {}
~NoHandleAllocation() {}
#else
- inline NoHandleAllocation();
+ explicit inline NoHandleAllocation(Isolate* isolate);
inline ~NoHandleAllocation();
private:
+ Isolate* isolate_;
int level_;
bool active_;
#endif
};
+
+class NoHandleDereference BASE_EMBEDDED {
+ public:
+#ifndef DEBUG
+ explicit NoHandleDereference(Isolate* isolate) {}
+ ~NoHandleDereference() {}
+#else
+ explicit inline NoHandleDereference(Isolate* isolate);
+ inline ~NoHandleDereference();
+ private:
+ Isolate* isolate_;
+ bool old_state_;
+#endif
+};
+
+
+class AllowHandleDereference BASE_EMBEDDED {
+ public:
+#ifndef DEBUG
+ explicit AllowHandleDereference(Isolate* isolate) {}
+ ~AllowHandleDereference() {}
+#else
+ explicit inline AllowHandleDereference(Isolate* isolate);
+ inline ~AllowHandleDereference();
+ private:
+ Isolate* isolate_;
+ bool old_state_;
+#endif
+};
+
} } // namespace v8::internal
#endif // V8_HANDLES_H_
diff --git a/src/3rdparty/v8/src/heap-inl.h b/src/3rdparty/v8/src/heap-inl.h
index cb274cb..c3dff99 100644
--- a/src/3rdparty/v8/src/heap-inl.h
+++ b/src/3rdparty/v8/src/heap-inl.h
@@ -91,30 +91,55 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromAscii(str, pretenure);
+ return AllocateStringFromOneByte(str, pretenure);
}
// Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
}
-MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field) {
- unibrow::Utf8InputBuffer<> buffer(str.start(),
- static_cast<unsigned>(str.length()));
- return AllocateInternalSymbol(&buffer, chars, hash_field);
+template<>
+bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
+ // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+ // ASCII only check.
+ return chars == str.length();
}
-MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
- uint32_t hash_field) {
- if (str.length() > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
+template<>
+bool inline Heap::IsOneByte(String* str, int chars) {
+ return str->IsOneByteRepresentation();
+}
+
+
+MaybeObject* Heap::AllocateInternalizedStringFromUtf8(
+ Vector<const char> str, int chars, uint32_t hash_field) {
+ if (IsOneByte(str, chars)) {
+ return AllocateOneByteInternalizedString(
+ Vector<const uint8_t>::cast(str), hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
+}
+
+
+template<typename T>
+MaybeObject* Heap::AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field) {
+ if (IsOneByte(t, chars)) {
+ return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
+ }
+ return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
+}
+
+
+MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field) {
+ if (str.length() > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0x2);
}
// Compute map and object size.
- Map* map = ascii_symbol_map();
- int size = SeqAsciiString::SizeFor(str.length());
+ Map* map = ascii_internalized_string_map();
+ int size = SeqOneByteString::SizeFor(str.length());
// Allocate string.
Object* result;
@@ -135,20 +160,20 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqAsciiString::kHeaderSize,
+ memcpy(answer->address() + SeqOneByteString::kHeaderSize,
str.start(), str.length());
return answer;
}
-MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str,
- uint32_t hash_field) {
+MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field) {
if (str.length() > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x3);
}
// Compute map and object size.
- Map* map = symbol_map();
+ Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
// Allocate string.
@@ -240,8 +265,9 @@ MaybeObject* Heap::NumberFromInt32(
MaybeObject* Heap::NumberFromUint32(
uint32_t value, PretenureFlag pretenure) {
- if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) {
- return Smi::FromInt((int32_t)value);
+ if (static_cast<int32_t>(value) >= 0 &&
+ Smi::IsValid(static_cast<int32_t>(value))) {
+ return Smi::FromInt(static_cast<int32_t>(value));
}
// Bypass NumberFromDouble to avoid various redundant checks.
return AllocateHeapNumber(FastUI2D(value), pretenure);
@@ -266,14 +292,12 @@ void Heap::FinalizeExternalString(HeapObject* string) {
} else {
JSObject *object = JSObject::cast(string);
Object *value = object->GetExternalResourceObject();
- v8::Object::ExternalResource *resource = 0;
- if (value->IsSmi()) {
- resource = reinterpret_cast<v8::Object::ExternalResource*>(Internals::GetExternalPointerFromSmi(value));
- } else if (value->IsForeign()) {
- resource = reinterpret_cast<v8::Object::ExternalResource*>(Foreign::cast(value)->foreign_address());
- }
- if (resource) {
- resource->Dispose();
+ if (value->IsForeign()) {
+ v8::Object::ExternalResource* resource =
+ reinterpret_cast<v8::Object::ExternalResource*>(
+ Foreign::cast(value)->foreign_address());
+ ASSERT(resource != NULL);
+ resource->Dispose();
}
}
}
@@ -446,6 +470,15 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
}
+MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_payload) {
+ return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0,
+ allocation_site_payload,
+ DONT_INITIALIZE_ARRAY_ELEMENTS);
+}
+
+
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
const char* collector_reason = NULL;
GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
@@ -476,7 +509,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes >= 0) {
+ if (change_in_bytes > 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
@@ -634,20 +667,24 @@ void ExternalStringTable::Verify() {
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
+#ifndef ENABLE_LATIN_1
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
+#endif
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
+#ifndef ENABLE_LATIN_1
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
}
+#endif
}
#endif
}
@@ -670,6 +707,19 @@ void ExternalStringTable::ShrinkNewObjects(int position) {
}
+void ErrorObjectList::Add(JSObject* object) {
+ list_.Add(object);
+}
+
+
+void ErrorObjectList::Iterate(ObjectVisitor* v) {
+ if (!list_.is_empty()) {
+ Object** start = &list_[0];
+ v->VisitPointers(start, start + list_.length());
+ }
+}
+
+
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(the_hole_value());
}
@@ -765,6 +815,18 @@ AlwaysAllocateScope::~AlwaysAllocateScope() {
}
+#ifdef VERIFY_HEAP
+NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() {
+ HEAP->no_weak_embedded_maps_verification_scope_depth_++;
+}
+
+
+NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() {
+ HEAP->no_weak_embedded_maps_verification_scope_depth_--;
+}
+#endif
+
+
void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
diff --git a/src/3rdparty/v8/src/heap-profiler.cc b/src/3rdparty/v8/src/heap-profiler.cc
index 301b099..c9f1d50 100644
--- a/src/3rdparty/v8/src/heap-profiler.cc
+++ b/src/3rdparty/v8/src/heap-profiler.cc
@@ -28,13 +28,13 @@
#include "v8.h"
#include "heap-profiler.h"
-#include "profile-generator.h"
+#include "heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
-HeapProfiler::HeapProfiler()
- : snapshots_(new HeapSnapshotsCollection()),
+HeapProfiler::HeapProfiler(Heap* heap)
+ : snapshots_(new HeapSnapshotsCollection(heap)),
next_snapshot_uid_(1) {
}
@@ -45,15 +45,16 @@ HeapProfiler::~HeapProfiler() {
void HeapProfiler::ResetSnapshots() {
+ Heap* the_heap = heap();
delete snapshots_;
- snapshots_ = new HeapSnapshotsCollection();
+ snapshots_ = new HeapSnapshotsCollection(the_heap);
}
void HeapProfiler::SetUp() {
Isolate* isolate = Isolate::Current();
if (isolate->heap_profiler() == NULL) {
- isolate->set_heap_profiler(new HeapProfiler());
+ isolate->set_heap_profiler(new HeapProfiler(isolate->heap()));
}
}
@@ -65,23 +66,29 @@ void HeapProfiler::TearDown() {
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control);
+ control,
+ resolver);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control);
+ control,
+ resolver);
}
@@ -122,16 +129,18 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result, control);
+ HeapSnapshotGenerator generator(result, control, resolver, heap());
generation_completed = generator.GenerateSnapshot();
break;
}
@@ -147,10 +156,13 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver) {
+ return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
+ resolver);
}
void HeapProfiler::StartHeapObjectsTrackingImpl() {
diff --git a/src/3rdparty/v8/src/heap-profiler.h b/src/3rdparty/v8/src/heap-profiler.h
index 346177b..c8c94f5 100644
--- a/src/3rdparty/v8/src/heap-profiler.h
+++ b/src/3rdparty/v8/src/heap-profiler.h
@@ -51,12 +51,16 @@ class HeapProfiler {
static size_t GetMemorySizeUsedByProfiler();
- static HeapSnapshot* TakeSnapshot(const char* name,
- int type,
- v8::ActivityControl* control);
- static HeapSnapshot* TakeSnapshot(String* name,
- int type,
- v8::ActivityControl* control);
+ static HeapSnapshot* TakeSnapshot(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ static HeapSnapshot* TakeSnapshot(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
@@ -79,20 +83,26 @@ class HeapProfiler {
}
private:
- HeapProfiler();
+ explicit HeapProfiler(Heap* heap);
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(const char* name,
- int type,
- v8::ActivityControl* control);
- HeapSnapshot* TakeSnapshotImpl(String* name,
- int type,
- v8::ActivityControl* control);
+ HeapSnapshot* TakeSnapshotImpl(
+ const char* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ HeapSnapshot* TakeSnapshotImpl(
+ String* name,
+ int type,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
void StopHeapObjectsTrackingImpl();
SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream);
+ Heap* heap() const { return snapshots_->heap(); }
+
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator-inl.h b/src/3rdparty/v8/src/heap-snapshot-generator-inl.h
new file mode 100644
index 0000000..1a878c6
--- /dev/null
+++ b/src/3rdparty/v8/src/heap-snapshot-generator-inl.h
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+
+#include "heap-snapshot-generator.h"
+
+namespace v8 {
+namespace internal {
+
+
+HeapEntry* HeapGraphEdge::from() const {
+ return &snapshot()->entries()[from_index_];
+}
+
+
+HeapSnapshot* HeapGraphEdge::snapshot() const {
+ return to_entry_->snapshot();
+}
+
+
+int HeapEntry::index() const {
+ return static_cast<int>(this - &snapshot_->entries().first());
+}
+
+
+int HeapEntry::set_children_index(int index) {
+ children_index_ = index;
+ int next_index = index + children_count_;
+ children_count_ = 0;
+ return next_index;
+}
+
+
+HeapGraphEdge** HeapEntry::children_arr() {
+ ASSERT(children_index_ >= 0);
+ return &snapshot_->children()[children_index_];
+}
+
+
+SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
+ return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+ return reinterpret_cast<HeapObject*>(
+ reinterpret_cast<char*>(kFirstGcSubrootObject) +
+ delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+ return static_cast<int>(
+ (reinterpret_cast<char*>(subroot) -
+ reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+ HeapObjectsMap::kObjectIdStep);
+}
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator.cc b/src/3rdparty/v8/src/heap-snapshot-generator.cc
new file mode 100644
index 0000000..9e96685
--- /dev/null
+++ b/src/3rdparty/v8/src/heap-snapshot-generator.cc
@@ -0,0 +1,2703 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "heap-snapshot-generator-inl.h"
+
+#include "heap-profiler.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+
+HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
+ : type_(type),
+ from_index_(from),
+ to_index_(to),
+ name_(name) {
+ ASSERT(type == kContextVariable
+ || type == kProperty
+ || type == kInternal
+ || type == kShortcut);
+}
+
+
+HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
+ : type_(type),
+ from_index_(from),
+ to_index_(to),
+ index_(index) {
+ ASSERT(type == kElement || type == kHidden || type == kWeak);
+}
+
+
+void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
+ to_entry_ = &snapshot->entries()[to_index_];
+}
+
+
+const int HeapEntry::kNoEntry = -1;
+
+HeapEntry::HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int self_size)
+ : type_(type),
+ children_count_(0),
+ children_index_(-1),
+ self_size_(self_size),
+ id_(id),
+ snapshot_(snapshot),
+ name_(name) { }
+
+
+void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
+ const char* name,
+ HeapEntry* entry) {
+ HeapGraphEdge edge(type, name, this->index(), entry->index());
+ snapshot_->edges().Add(edge);
+ ++children_count_;
+}
+
+
+void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
+ int index,
+ HeapEntry* entry) {
+ HeapGraphEdge edge(type, index, this->index(), entry->index());
+ snapshot_->edges().Add(edge);
+ ++children_count_;
+}
+
+
+Handle<HeapObject> HeapEntry::GetHeapObject() {
+ return snapshot_->collection()->FindHeapObjectById(id());
+}
+
+
+void HeapEntry::Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent) {
+ STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
+ OS::Print("%6d @%6u %*c %s%s: ",
+ self_size(), id(), indent, ' ', prefix, edge_name);
+ if (type() != kString) {
+ OS::Print("%s %.40s\n", TypeAsString(), name_);
+ } else {
+ OS::Print("\"");
+ const char* c = name_;
+ while (*c && (c - name_) <= 40) {
+ if (*c != '\n')
+ OS::Print("%c", *c);
+ else
+ OS::Print("\\n");
+ ++c;
+ }
+ OS::Print("\"\n");
+ }
+ if (--max_depth == 0) return;
+ Vector<HeapGraphEdge*> ch = children();
+ for (int i = 0; i < ch.length(); ++i) {
+ HeapGraphEdge& edge = *ch[i];
+ const char* edge_prefix = "";
+ EmbeddedVector<char, 64> index;
+ const char* edge_name = index.start();
+ switch (edge.type()) {
+ case HeapGraphEdge::kContextVariable:
+ edge_prefix = "#";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kElement:
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ case HeapGraphEdge::kInternal:
+ edge_prefix = "$";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kProperty:
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kHidden:
+ edge_prefix = "$";
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ case HeapGraphEdge::kShortcut:
+ edge_prefix = "^";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kWeak:
+ edge_prefix = "w";
+ OS::SNPrintF(index, "%d", edge.index());
+ break;
+ default:
+ OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
+ }
+ edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
+ }
+}
+
+
+const char* HeapEntry::TypeAsString() {
+ switch (type()) {
+ case kHidden: return "/hidden/";
+ case kObject: return "/object/";
+ case kClosure: return "/closure/";
+ case kString: return "/string/";
+ case kCode: return "/code/";
+ case kArray: return "/array/";
+ case kRegExp: return "/regexp/";
+ case kHeapNumber: return "/number/";
+ case kNative: return "/native/";
+ case kSynthetic: return "/synthetic/";
+ default: return "???";
+ }
+}
+
+
+// It is very important to keep objects that form a heap snapshot
+// as small as possible.
+namespace { // Avoid littering the global namespace.
+
+template <size_t ptr_size> struct SnapshotSizeConstants;
+
+template <> struct SnapshotSizeConstants<4> {
+ static const int kExpectedHeapGraphEdgeSize = 12;
+ static const int kExpectedHeapEntrySize = 24;
+ static const int kExpectedHeapSnapshotsCollectionSize = 100;
+ static const int kExpectedHeapSnapshotSize = 136;
+ static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
+};
+
+template <> struct SnapshotSizeConstants<8> {
+ static const int kExpectedHeapGraphEdgeSize = 24;
+ static const int kExpectedHeapEntrySize = 32;
+ static const int kExpectedHeapSnapshotsCollectionSize = 152;
+ static const int kExpectedHeapSnapshotSize = 168;
+ static const uint64_t kMaxSerializableSnapshotRawSize =
+ static_cast<uint64_t>(6000) * MB;
+};
+
+} // namespace
+
+HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
+ HeapSnapshot::Type type,
+ const char* title,
+ unsigned uid)
+ : collection_(collection),
+ type_(type),
+ title_(title),
+ uid_(uid),
+ root_index_(HeapEntry::kNoEntry),
+ gc_roots_index_(HeapEntry::kNoEntry),
+ natives_root_index_(HeapEntry::kNoEntry),
+ max_snapshot_js_object_id_(0) {
+ STATIC_CHECK(
+ sizeof(HeapGraphEdge) ==
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
+ STATIC_CHECK(
+ sizeof(HeapEntry) ==
+ SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
+ for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
+ }
+}
+
+
+void HeapSnapshot::Delete() {
+ collection_->RemoveSnapshot(this);
+ delete this;
+}
+
+
+void HeapSnapshot::RememberLastJSObjectId() {
+ max_snapshot_js_object_id_ = collection_->last_assigned_id();
+}
+
+
+HeapEntry* HeapSnapshot::AddRootEntry() {
+ ASSERT(root_index_ == HeapEntry::kNoEntry);
+ ASSERT(entries_.is_empty()); // Root entry must be the first one.
+ HeapEntry* entry = AddEntry(HeapEntry::kObject,
+ "",
+ HeapObjectsMap::kInternalRootObjectId,
+ 0);
+ root_index_ = entry->index();
+ ASSERT(root_index_ == 0);
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddGcRootsEntry() {
+ ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
+ HeapEntry* entry = AddEntry(HeapEntry::kObject,
+ "(GC roots)",
+ HeapObjectsMap::kGcRootsObjectId,
+ 0);
+ gc_roots_index_ = entry->index();
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+ ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
+ ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+ HeapEntry* entry = AddEntry(
+ HeapEntry::kObject,
+ VisitorSynchronization::kTagNames[tag],
+ HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0);
+ gc_subroot_indexes_[tag] = entry->index();
+ return entry;
+}
+
+
+HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int size) {
+ HeapEntry entry(this, type, name, id, size);
+ entries_.Add(entry);
+ return &entries_.last();
+}
+
+
+void HeapSnapshot::FillChildren() {
+ ASSERT(children().is_empty());
+ children().Allocate(edges().length());
+ int children_index = 0;
+ for (int i = 0; i < entries().length(); ++i) {
+ HeapEntry* entry = &entries()[i];
+ children_index = entry->set_children_index(children_index);
+ }
+ ASSERT(edges().length() == children_index);
+ for (int i = 0; i < edges().length(); ++i) {
+ HeapGraphEdge* edge = &edges()[i];
+ edge->ReplaceToIndexWithEntry(this);
+ edge->from()->add_child(edge);
+ }
+}
+
+
+class FindEntryById {
+ public:
+ explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
+ int operator()(HeapEntry* const* entry) {
+ if ((*entry)->id() == id_) return 0;
+ return (*entry)->id() < id_ ? -1 : 1;
+ }
+ private:
+ SnapshotObjectId id_;
+};
+
+
+HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
+ List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
+ // Perform a binary search by id.
+ int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
+ if (index == -1)
+ return NULL;
+ return entries_by_id->at(index);
+}
+
+
+template<class T>
+static int SortByIds(const T* entry1_ptr,
+ const T* entry2_ptr) {
+ if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
+ return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
+}
+
+
+List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
+ if (sorted_entries_.is_empty()) {
+ sorted_entries_.Allocate(entries_.length());
+ for (int i = 0; i < entries_.length(); ++i) {
+ sorted_entries_[i] = &entries_[i];
+ }
+ sorted_entries_.Sort(SortByIds);
+ }
+ return &sorted_entries_;
+}
+
+
+void HeapSnapshot::Print(int max_depth) {
+ root()->Print("", "", max_depth, 0);
+}
+
+
+template<typename T, class P>
+static size_t GetMemoryUsedByList(const List<T, P>& list) {
+ return list.length() * sizeof(T) + sizeof(list);
+}
+
+
+size_t HeapSnapshot::RawSnapshotSize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
+ sizeof(HeapSnapshot)); // NOLINT
+ return
+ sizeof(*this) +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(edges_) +
+ GetMemoryUsedByList(children_) +
+ GetMemoryUsedByList(sorted_entries_);
+}
+
+
+// We split IDs on evens for embedder objects (see
+// HeapObjectsMap::GenerateId) and odds for native objects.
+const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
+const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
+ HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
+ HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
+ HeapObjectsMap::kGcRootsFirstSubrootId +
+ VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
+
+HeapObjectsMap::HeapObjectsMap(Heap* heap)
+ : next_id_(kFirstAvailableObjectId),
+ entries_map_(AddressesMatch),
+ heap_(heap) {
+ // This dummy element solves a problem with entries_map_.
+ // When we do lookup in HashMap we see no difference between two cases:
+ // it has an entry with NULL as the value or it has created
+ // a new entry on the fly with NULL as the default value.
+ // With such dummy element we have a guaranty that all entries_map_ entries
+ // will have the value field grater than 0.
+ // This fact is using in MoveObject method.
+ entries_.Add(EntryInfo(0, NULL, 0));
+}
+
+
+void HeapObjectsMap::SnapshotGenerationFinished() {
+ RemoveDeadEntries();
+}
+
+
+void HeapObjectsMap::MoveObject(Address from, Address to) {
+ ASSERT(to != NULL);
+ ASSERT(from != NULL);
+ if (from == to) return;
+ void* from_value = entries_map_.Remove(from, AddressHash(from));
+ if (from_value == NULL) {
+ // It may occur that some untracked object moves to an address X and there
+ // is a tracked object at that address. In this case we should remove the
+ // entry as we know that the object has died.
+ void* to_value = entries_map_.Remove(to, AddressHash(to));
+ if (to_value != NULL) {
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ } else {
+ HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
+ if (to_entry->value != NULL) {
+ // We found the existing entry with to address for an old object.
+ // Without this operation we will have two EntryInfo's with the same
+ // value in addr field. It is bad because later at RemoveDeadEntries
+ // one of this entry will be removed with the corresponding entries_map_
+ // entry.
+ int to_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
+ entries_.at(to_entry_info_index).addr = NULL;
+ }
+ int from_entry_info_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(from_value));
+ entries_.at(from_entry_info_index).addr = to;
+ to_entry->value = from_value;
+ }
+}
+
+
+SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
+ if (entry == NULL) return 0;
+ int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ return entry_info.id;
+}
+
+
+SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
+ unsigned int size) {
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
+ if (entry->value != NULL) {
+ int entry_index =
+ static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ entry_info.accessed = true;
+ entry_info.size = size;
+ return entry_info.id;
+ }
+ entry->value = reinterpret_cast<void*>(entries_.length());
+ SnapshotObjectId id = next_id_;
+ next_id_ += kObjectIdStep;
+ entries_.Add(EntryInfo(id, addr, size));
+ ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+ return id;
+}
+
+
+void HeapObjectsMap::StopHeapObjectsTracking() {
+ time_intervals_.Clear();
+}
+
+void HeapObjectsMap::UpdateHeapObjectsMap() {
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "HeapSnapshotsCollection::UpdateHeapObjectsMap");
+ HeapIterator iterator(heap_);
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ FindOrAddEntry(obj->address(), obj->Size());
+ }
+ RemoveDeadEntries();
+}
+
+
+SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
+ UpdateHeapObjectsMap();
+ time_intervals_.Add(TimeInterval(next_id_));
+ int prefered_chunk_size = stream->GetChunkSize();
+ List<v8::HeapStatsUpdate> stats_buffer;
+ ASSERT(!entries_.is_empty());
+ EntryInfo* entry_info = &entries_.first();
+ EntryInfo* end_entry_info = &entries_.last() + 1;
+ for (int time_interval_index = 0;
+ time_interval_index < time_intervals_.length();
+ ++time_interval_index) {
+ TimeInterval& time_interval = time_intervals_[time_interval_index];
+ SnapshotObjectId time_interval_id = time_interval.id;
+ uint32_t entries_size = 0;
+ EntryInfo* start_entry_info = entry_info;
+ while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
+ entries_size += entry_info->size;
+ ++entry_info;
+ }
+ uint32_t entries_count =
+ static_cast<uint32_t>(entry_info - start_entry_info);
+ if (time_interval.count != entries_count ||
+ time_interval.size != entries_size) {
+ stats_buffer.Add(v8::HeapStatsUpdate(
+ time_interval_index,
+ time_interval.count = entries_count,
+ time_interval.size = entries_size));
+ if (stats_buffer.length() >= prefered_chunk_size) {
+ OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+ &stats_buffer.first(), stats_buffer.length());
+ if (result == OutputStream::kAbort) return last_assigned_id();
+ stats_buffer.Clear();
+ }
+ }
+ }
+ ASSERT(entry_info == end_entry_info);
+ if (!stats_buffer.is_empty()) {
+ OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
+ &stats_buffer.first(), stats_buffer.length());
+ if (result == OutputStream::kAbort) return last_assigned_id();
+ }
+ stream->EndOfStream();
+ return last_assigned_id();
+}
+
+
+void HeapObjectsMap::RemoveDeadEntries() {
+ ASSERT(entries_.length() > 0 &&
+ entries_.at(0).id == 0 &&
+ entries_.at(0).addr == NULL);
+ int first_free_entry = 1;
+ for (int i = 1; i < entries_.length(); ++i) {
+ EntryInfo& entry_info = entries_.at(i);
+ if (entry_info.accessed) {
+ if (first_free_entry != i) {
+ entries_.at(first_free_entry) = entry_info;
+ }
+ entries_.at(first_free_entry).accessed = false;
+ HashMap::Entry* entry = entries_map_.Lookup(
+ entry_info.addr, AddressHash(entry_info.addr), false);
+ ASSERT(entry);
+ entry->value = reinterpret_cast<void*>(first_free_entry);
+ ++first_free_entry;
+ } else {
+ if (entry_info.addr) {
+ entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
+ }
+ }
+ }
+ entries_.Rewind(first_free_entry);
+ ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
+ entries_map_.occupancy());
+}
+
+
+SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
+ SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
+ const char* label = info->GetLabel();
+ id ^= StringHasher::HashSequentialString(label,
+ static_cast<int>(strlen(label)),
+ HEAP->HashSeed());
+ intptr_t element_count = info->GetElementCount();
+ if (element_count != -1)
+ id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
+ v8::internal::kZeroHashSeed);
+ return id << 1;
+}
+
+
+size_t HeapObjectsMap::GetUsedMemorySize() const {
+ return
+ sizeof(*this) +
+ sizeof(HashMap::Entry) * entries_map_.capacity() +
+ GetMemoryUsedByList(entries_) +
+ GetMemoryUsedByList(time_intervals_);
+}
+
+
+HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
+ : is_tracking_objects_(false),
+ snapshots_uids_(HeapSnapshotsMatch),
+ token_enumerator_(new TokenEnumerator()),
+ ids_(heap) {
+}
+
+
+static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
+ delete *snapshot_ptr;
+}
+
+
+HeapSnapshotsCollection::~HeapSnapshotsCollection() {
+ delete token_enumerator_;
+ snapshots_.Iterate(DeleteHeapSnapshot);
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
+ const char* name,
+ unsigned uid) {
+ is_tracking_objects_ = true; // Start watching for heap objects moves.
+ return new HeapSnapshot(this, type, name, uid);
+}
+
+
+void HeapSnapshotsCollection::SnapshotGenerationFinished(
+ HeapSnapshot* snapshot) {
+ ids_.SnapshotGenerationFinished();
+ if (snapshot != NULL) {
+ snapshots_.Add(snapshot);
+ HashMap::Entry* entry =
+ snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
+ static_cast<uint32_t>(snapshot->uid()),
+ true);
+ ASSERT(entry->value == NULL);
+ entry->value = snapshot;
+ }
+}
+
+
+HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
+ HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid),
+ false);
+ return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
+}
+
+
+void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
+ snapshots_.RemoveElement(snapshot);
+ unsigned uid = snapshot->uid();
+ snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
+ static_cast<uint32_t>(uid));
+}
+
+
+Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
+ SnapshotObjectId id) {
+ // First perform a full GC in order to avoid dead objects.
+ HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "HeapSnapshotsCollection::FindHeapObjectById");
+ AssertNoAllocation no_allocation;
+ HeapObject* object = NULL;
+ HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable);
+ // Make sure that object with the given id is still reachable.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ if (ids_.FindEntry(obj->address()) == id) {
+ ASSERT(object == NULL);
+ object = obj;
+ // Can't break -- kFilterUnreachable requires full heap traversal.
+ }
+ }
+ return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
+}
+
+
+size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
+ STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
+ kExpectedHeapSnapshotsCollectionSize ==
+ sizeof(HeapSnapshotsCollection)); // NOLINT
+ size_t size = sizeof(*this);
+ size += names_.GetUsedMemorySize();
+ size += ids_.GetUsedMemorySize();
+ size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
+ size += GetMemoryUsedByList(snapshots_);
+ for (int i = 0; i < snapshots_.length(); ++i) {
+ size += snapshots_[i]->RawSnapshotSize();
+ }
+ return size;
+}
+
+
+HeapEntriesMap::HeapEntriesMap()
+ : entries_(HeapThingsMatch) {
+}
+
+
+int HeapEntriesMap::Map(HeapThing thing) {
+ HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
+ if (cache_entry == NULL) return HeapEntry::kNoEntry;
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+void HeapEntriesMap::Pair(HeapThing thing, int entry) {
+ HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
+ ASSERT(cache_entry->value == NULL);
+ cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
+}
+
+
+HeapObjectsSet::HeapObjectsSet()
+ : entries_(HeapEntriesMap::HeapThingsMatch) {
+}
+
+
+void HeapObjectsSet::Clear() {
+ entries_.Clear();
+}
+
+
+bool HeapObjectsSet::Contains(Object* obj) {
+ if (!obj->IsHeapObject()) return false;
+ HeapObject* object = HeapObject::cast(obj);
+ return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
+}
+
+
+void HeapObjectsSet::Insert(Object* obj) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+}
+
+
+const char* HeapObjectsSet::GetTag(Object* obj) {
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
+ return cache_entry != NULL
+ ? reinterpret_cast<const char*>(cache_entry->value)
+ : NULL;
+}
+
+
+void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
+ if (!obj->IsHeapObject()) return;
+ HeapObject* object = HeapObject::cast(obj);
+ HashMap::Entry* cache_entry =
+ entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
+ cache_entry->value = const_cast<char*>(tag);
+}
+
+
+HeapObject* const V8HeapExplorer::kInternalRootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
+HeapObject* const V8HeapExplorer::kGcRootsObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
+
+
+V8HeapExplorer::V8HeapExplorer(
+ HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver)
+ : heap_(Isolate::Current()->heap()),
+ snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ progress_(progress),
+ filler_(NULL),
+ global_object_name_resolver_(resolver) {
+}
+
+
+V8HeapExplorer::~V8HeapExplorer() {
+}
+
+
+HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
+ return AddEntry(reinterpret_cast<HeapObject*>(ptr));
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
+ if (object == kInternalRootObject) {
+ snapshot_->AddRootEntry();
+ return snapshot_->root();
+ } else if (object == kGcRootsObject) {
+ HeapEntry* entry = snapshot_->AddGcRootsEntry();
+ return entry;
+ } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+ HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
+ return entry;
+ } else if (object->IsJSFunction()) {
+ JSFunction* func = JSFunction::cast(object);
+ SharedFunctionInfo* shared = func->shared();
+ const char* name = shared->bound() ? "native_bind" :
+ collection_->names()->GetName(String::cast(shared->name()));
+ return AddEntry(object, HeapEntry::kClosure, name);
+ } else if (object->IsJSRegExp()) {
+ JSRegExp* re = JSRegExp::cast(object);
+ return AddEntry(object,
+ HeapEntry::kRegExp,
+ collection_->names()->GetName(re->Pattern()));
+ } else if (object->IsJSObject()) {
+ const char* name = collection_->names()->GetName(
+ GetConstructorName(JSObject::cast(object)));
+ if (object->IsJSGlobalObject()) {
+ const char* tag = objects_tags_.GetTag(object);
+ if (tag != NULL) {
+ name = collection_->names()->GetFormatted("%s / %s", name, tag);
+ }
+ }
+ return AddEntry(object, HeapEntry::kObject, name);
+ } else if (object->IsString()) {
+ return AddEntry(object,
+ HeapEntry::kString,
+ collection_->names()->GetName(String::cast(object)));
+ } else if (object->IsCode()) {
+ return AddEntry(object, HeapEntry::kCode, "");
+ } else if (object->IsSharedFunctionInfo()) {
+ String* name = String::cast(SharedFunctionInfo::cast(object)->name());
+ return AddEntry(object,
+ HeapEntry::kCode,
+ collection_->names()->GetName(name));
+ } else if (object->IsScript()) {
+ Object* name = Script::cast(object)->name();
+ return AddEntry(object,
+ HeapEntry::kCode,
+ name->IsString()
+ ? collection_->names()->GetName(String::cast(name))
+ : "");
+ } else if (object->IsNativeContext()) {
+ return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
+ } else if (object->IsContext()) {
+ return AddEntry(object, HeapEntry::kHidden, "system / Context");
+ } else if (object->IsFixedArray() ||
+ object->IsFixedDoubleArray() ||
+ object->IsByteArray() ||
+ object->IsExternalArray()) {
+ return AddEntry(object, HeapEntry::kArray, "");
+ } else if (object->IsHeapNumber()) {
+ return AddEntry(object, HeapEntry::kHeapNumber, "number");
+ }
+ return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
+}
+
+
+HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name) {
+ int object_size = object->Size();
+ SnapshotObjectId object_id =
+ collection_->GetObjectId(object->address(), object_size);
+ return snapshot_->AddEntry(type, name, object_id, object_size);
+}
+
+
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+ GcSubrootsEnumerator(
+ SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ : filler_(filler),
+ explorer_(explorer),
+ previous_object_count_(0),
+ object_count_(0) {
+ }
+ void VisitPointers(Object** start, Object** end) {
+ object_count_ += end - start;
+ }
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ // Skip empty subroots.
+ if (previous_object_count_ != object_count_) {
+ previous_object_count_ = object_count_;
+ filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+ }
+ }
+ private:
+ SnapshotFillerInterface* filler_;
+ V8HeapExplorer* explorer_;
+ intptr_t previous_object_count_;
+ intptr_t object_count_;
+};
+
+
+void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
+ filler->AddEntry(kInternalRootObject, this);
+ filler->AddEntry(kGcRootsObject, this);
+ GcSubrootsEnumerator enumerator(filler, this);
+ heap_->IterateRoots(&enumerator, VISIT_ALL);
+}
+
+
+const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
+ switch (object->map()->instance_type()) {
+ case MAP_TYPE:
+ switch (Map::cast(object)->instance_type()) {
+#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
+ case instance_type: return "system / Map (" #Name ")";
+ STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
+#undef MAKE_STRING_MAP_CASE
+ default: return "system / Map";
+ }
+ case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
+ case FOREIGN_TYPE: return "system / Foreign";
+ case ODDBALL_TYPE: return "system / Oddball";
+#define MAKE_STRUCT_CASE(NAME, Name, name) \
+ case NAME##_TYPE: return "system / "#Name;
+ STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+ default: return "system";
+ }
+}
+
+
+int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
+ int objects_count = 0;
+ for (HeapObject* obj = iterator->next();
+ obj != NULL;
+ obj = iterator->next()) {
+ objects_count++;
+ }
+ return objects_count;
+}
+
+
+class IndexedReferencesExtractor : public ObjectVisitor {
+ public:
+ IndexedReferencesExtractor(V8HeapExplorer* generator,
+ HeapObject* parent_obj,
+ int parent)
+ : generator_(generator),
+ parent_obj_(parent_obj),
+ parent_(parent),
+ next_index_(1) {
+ }
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if (CheckVisitedAndUnmark(p)) continue;
+ generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
+ }
+ }
+ static void MarkVisitedField(HeapObject* obj, int offset) {
+ if (offset < 0) return;
+ Address field = obj->address() + offset;
+ ASSERT(!Memory::Object_at(field)->IsFailure());
+ ASSERT(Memory::Object_at(field)->IsHeapObject());
+ *field |= kFailureTag;
+ }
+
+ private:
+ bool CheckVisitedAndUnmark(Object** field) {
+ if ((*field)->IsFailure()) {
+ intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
+ *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
+ ASSERT((*field)->IsHeapObject());
+ return true;
+ }
+ return false;
+ }
+ V8HeapExplorer* generator_;
+ HeapObject* parent_obj_;
+ int parent_;
+ int next_index_;
+};
+
+
+void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
+ HeapEntry* heap_entry = GetEntry(obj);
+ if (heap_entry == NULL) return; // No interest in this object.
+ int entry = heap_entry->index();
+
+ bool extract_indexed_refs = true;
+ if (obj->IsJSGlobalProxy()) {
+ ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
+ } else if (obj->IsJSObject()) {
+ ExtractJSObjectReferences(entry, JSObject::cast(obj));
+ } else if (obj->IsString()) {
+ ExtractStringReferences(entry, String::cast(obj));
+ } else if (obj->IsContext()) {
+ ExtractContextReferences(entry, Context::cast(obj));
+ } else if (obj->IsMap()) {
+ ExtractMapReferences(entry, Map::cast(obj));
+ } else if (obj->IsSharedFunctionInfo()) {
+ ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
+ } else if (obj->IsScript()) {
+ ExtractScriptReferences(entry, Script::cast(obj));
+ } else if (obj->IsCodeCache()) {
+ ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
+ } else if (obj->IsCode()) {
+ ExtractCodeReferences(entry, Code::cast(obj));
+ } else if (obj->IsJSGlobalPropertyCell()) {
+ ExtractJSGlobalPropertyCellReferences(
+ entry, JSGlobalPropertyCell::cast(obj));
+ extract_indexed_refs = false;
+ }
+ if (extract_indexed_refs) {
+ SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
+ IndexedReferencesExtractor refs_extractor(this, obj, entry);
+ obj->Iterate(&refs_extractor);
+ }
+}
+
+
+void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
+ // We need to reference JS global objects from snapshot's root.
+ // We use JSGlobalProxy because this is what embedder (e.g. browser)
+ // uses for the global object.
+ Object* object = proxy->map()->prototype();
+ bool is_debug_object = false;
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ is_debug_object = object->IsGlobalObject() &&
+ Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
+#endif
+ if (!is_debug_object) {
+ SetUserGlobalReference(object);
+ }
+}
+
+
+void V8HeapExplorer::ExtractJSObjectReferences(
+ int entry, JSObject* js_obj) {
+ HeapObject* obj = js_obj;
+ ExtractClosureReferences(js_obj, entry);
+ ExtractPropertyReferences(js_obj, entry);
+ ExtractElementReferences(js_obj, entry);
+ ExtractInternalReferences(js_obj, entry);
+ SetPropertyReference(
+ obj, entry, heap_->proto_string(), js_obj->GetPrototype());
+ if (obj->IsJSFunction()) {
+ JSFunction* js_fun = JSFunction::cast(js_obj);
+ Object* proto_or_map = js_fun->prototype_or_initial_map();
+ if (!proto_or_map->IsTheHole()) {
+ if (!proto_or_map->IsMap()) {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_string(), proto_or_map,
+ NULL,
+ JSFunction::kPrototypeOrInitialMapOffset);
+ } else {
+ SetPropertyReference(
+ obj, entry,
+ heap_->prototype_string(), js_fun->prototype());
+ }
+ }
+ SharedFunctionInfo* shared_info = js_fun->shared();
+ // JSFunction has either bindings or literals and never both.
+ bool bound = shared_info->bound();
+ TagObject(js_fun->literals_or_bindings(),
+ bound ? "(function bindings)" : "(function literals)");
+ SetInternalReference(js_fun, entry,
+ bound ? "bindings" : "literals",
+ js_fun->literals_or_bindings(),
+ JSFunction::kLiteralsOffset);
+ TagObject(shared_info, "(shared function info)");
+ SetInternalReference(js_fun, entry,
+ "shared", shared_info,
+ JSFunction::kSharedFunctionInfoOffset);
+ TagObject(js_fun->unchecked_context(), "(context)");
+ SetInternalReference(js_fun, entry,
+ "context", js_fun->unchecked_context(),
+ JSFunction::kContextOffset);
+ for (int i = JSFunction::kNonWeakFieldsEndOffset;
+ i < JSFunction::kSize;
+ i += kPointerSize) {
+ SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+ }
+ } else if (obj->IsGlobalObject()) {
+ GlobalObject* global_obj = GlobalObject::cast(obj);
+ SetInternalReference(global_obj, entry,
+ "builtins", global_obj->builtins(),
+ GlobalObject::kBuiltinsOffset);
+ SetInternalReference(global_obj, entry,
+ "native_context", global_obj->native_context(),
+ GlobalObject::kNativeContextOffset);
+ SetInternalReference(global_obj, entry,
+ "global_receiver", global_obj->global_receiver(),
+ GlobalObject::kGlobalReceiverOffset);
+ }
+ TagObject(js_obj->properties(), "(object properties)");
+ SetInternalReference(obj, entry,
+ "properties", js_obj->properties(),
+ JSObject::kPropertiesOffset);
+ TagObject(js_obj->elements(), "(object elements)");
+ SetInternalReference(obj, entry,
+ "elements", js_obj->elements(),
+ JSObject::kElementsOffset);
+}
+
+
+void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
+ if (string->IsConsString()) {
+ ConsString* cs = ConsString::cast(string);
+ SetInternalReference(cs, entry, "first", cs->first(),
+ ConsString::kFirstOffset);
+ SetInternalReference(cs, entry, "second", cs->second(),
+ ConsString::kSecondOffset);
+ } else if (string->IsSlicedString()) {
+ SlicedString* ss = SlicedString::cast(string);
+ SetInternalReference(ss, entry, "parent", ss->parent(),
+ SlicedString::kParentOffset);
+ }
+}
+
+
+void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
+#define EXTRACT_CONTEXT_FIELD(index, type, name) \
+ SetInternalReference(context, entry, #name, context->get(Context::index), \
+ FixedArray::OffsetOfElementAt(Context::index));
+ EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
+ EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
+ EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
+ EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
+ if (context->IsNativeContext()) {
+ TagObject(context->jsfunction_result_caches(),
+ "(context func. result caches)");
+ TagObject(context->normalized_map_cache(), "(context norm. map cache)");
+ TagObject(context->runtime_context(), "(runtime context)");
+ TagObject(context->embedder_data(), "(context data)");
+ NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+#undef EXTRACT_CONTEXT_FIELD
+ for (int i = Context::FIRST_WEAK_SLOT;
+ i < Context::NATIVE_CONTEXT_SLOTS;
+ ++i) {
+ SetWeakReference(context, entry, i, context->get(i),
+ FixedArray::OffsetOfElementAt(i));
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
+ SetInternalReference(map, entry,
+ "prototype", map->prototype(), Map::kPrototypeOffset);
+ SetInternalReference(map, entry,
+ "constructor", map->constructor(),
+ Map::kConstructorOffset);
+ if (map->HasTransitionArray()) {
+ TransitionArray* transitions = map->transitions();
+
+ Object* back_pointer = transitions->back_pointer_storage();
+ TagObject(transitions->back_pointer_storage(), "(back pointer)");
+ SetInternalReference(transitions, entry,
+ "backpointer", back_pointer,
+ TransitionArray::kBackPointerStorageOffset);
+ IndexedReferencesExtractor transitions_refs(this, transitions, entry);
+ transitions->Iterate(&transitions_refs);
+
+ TagObject(transitions, "(transition array)");
+ SetInternalReference(map, entry,
+ "transitions", transitions,
+ Map::kTransitionsOrBackPointerOffset);
+ } else {
+ Object* back_pointer = map->GetBackPointer();
+ TagObject(back_pointer, "(back pointer)");
+ SetInternalReference(map, entry,
+ "backpointer", back_pointer,
+ Map::kTransitionsOrBackPointerOffset);
+ }
+ DescriptorArray* descriptors = map->instance_descriptors();
+ TagObject(descriptors, "(map descriptors)");
+ SetInternalReference(map, entry,
+ "descriptors", descriptors,
+ Map::kDescriptorsOffset);
+
+ SetInternalReference(map, entry,
+ "code_cache", map->code_cache(),
+ Map::kCodeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
+ int entry, SharedFunctionInfo* shared) {
+ HeapObject* obj = shared;
+ SetInternalReference(obj, entry,
+ "name", shared->name(),
+ SharedFunctionInfo::kNameOffset);
+ TagObject(shared->code(), "(code)");
+ SetInternalReference(obj, entry,
+ "code", shared->code(),
+ SharedFunctionInfo::kCodeOffset);
+ TagObject(shared->scope_info(), "(function scope info)");
+ SetInternalReference(obj, entry,
+ "scope_info", shared->scope_info(),
+ SharedFunctionInfo::kScopeInfoOffset);
+ SetInternalReference(obj, entry,
+ "instance_class_name", shared->instance_class_name(),
+ SharedFunctionInfo::kInstanceClassNameOffset);
+ SetInternalReference(obj, entry,
+ "script", shared->script(),
+ SharedFunctionInfo::kScriptOffset);
+ TagObject(shared->construct_stub(), "(code)");
+ SetInternalReference(obj, entry,
+ "construct_stub", shared->construct_stub(),
+ SharedFunctionInfo::kConstructStubOffset);
+ SetInternalReference(obj, entry,
+ "function_data", shared->function_data(),
+ SharedFunctionInfo::kFunctionDataOffset);
+ SetInternalReference(obj, entry,
+ "debug_info", shared->debug_info(),
+ SharedFunctionInfo::kDebugInfoOffset);
+ SetInternalReference(obj, entry,
+ "inferred_name", shared->inferred_name(),
+ SharedFunctionInfo::kInferredNameOffset);
+ SetInternalReference(obj, entry,
+ "this_property_assignments",
+ shared->this_property_assignments(),
+ SharedFunctionInfo::kThisPropertyAssignmentsOffset);
+ SetWeakReference(obj, entry,
+ 1, shared->initial_map(),
+ SharedFunctionInfo::kInitialMapOffset);
+}
+
+
+void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
+ HeapObject* obj = script;
+ SetInternalReference(obj, entry,
+ "source", script->source(),
+ Script::kSourceOffset);
+ SetInternalReference(obj, entry,
+ "name", script->name(),
+ Script::kNameOffset);
+ SetInternalReference(obj, entry,
+ "data", script->data(),
+ Script::kDataOffset);
+ SetInternalReference(obj, entry,
+ "context_data", script->context_data(),
+ Script::kContextOffset);
+ TagObject(script->line_ends(), "(script line ends)");
+ SetInternalReference(obj, entry,
+ "line_ends", script->line_ends(),
+ Script::kLineEndsOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeCacheReferences(
+ int entry, CodeCache* code_cache) {
+ TagObject(code_cache->default_cache(), "(default code cache)");
+ SetInternalReference(code_cache, entry,
+ "default_cache", code_cache->default_cache(),
+ CodeCache::kDefaultCacheOffset);
+ TagObject(code_cache->normal_type_cache(), "(code type cache)");
+ SetInternalReference(code_cache, entry,
+ "type_cache", code_cache->normal_type_cache(),
+ CodeCache::kNormalTypeCacheOffset);
+}
+
+
+void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
+ TagObject(code->relocation_info(), "(code relocation info)");
+ SetInternalReference(code, entry,
+ "relocation_info", code->relocation_info(),
+ Code::kRelocationInfoOffset);
+ SetInternalReference(code, entry,
+ "handler_table", code->handler_table(),
+ Code::kHandlerTableOffset);
+ TagObject(code->deoptimization_data(), "(code deopt data)");
+ SetInternalReference(code, entry,
+ "deoptimization_data", code->deoptimization_data(),
+ Code::kDeoptimizationDataOffset);
+ if (code->kind() == Code::FUNCTION) {
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
+ }
+ SetInternalReference(code, entry,
+ "gc_metadata", code->gc_metadata(),
+ Code::kGCMetadataOffset);
+}
+
+
+void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
+ int entry, JSGlobalPropertyCell* cell) {
+ SetInternalReference(cell, entry, "value", cell->value());
+}
+
+
+void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
+ if (!js_obj->IsJSFunction()) return;
+
+ JSFunction* func = JSFunction::cast(js_obj);
+ if (func->shared()->bound()) {
+ FixedArray* bindings = func->function_bindings();
+ SetNativeBindReference(js_obj, entry, "bound_this",
+ bindings->get(JSFunction::kBoundThisIndex));
+ SetNativeBindReference(js_obj, entry, "bound_function",
+ bindings->get(JSFunction::kBoundFunctionIndex));
+ for (int i = JSFunction::kBoundArgumentsStartIndex;
+ i < bindings->length(); i++) {
+ const char* reference_name = collection_->names()->GetFormatted(
+ "bound_argument_%d",
+ i - JSFunction::kBoundArgumentsStartIndex);
+ SetNativeBindReference(js_obj, entry, reference_name,
+ bindings->get(i));
+ }
+ } else {
+ Context* context = func->context()->declaration_context();
+ ScopeInfo* scope_info = context->closure()->shared()->scope_info();
+ // Add context allocated locals.
+ int context_locals = scope_info->ContextLocalCount();
+ for (int i = 0; i < context_locals; ++i) {
+ String* local_name = scope_info->ContextLocalName(i);
+ int idx = Context::MIN_CONTEXT_SLOTS + i;
+ SetClosureReference(js_obj, entry, local_name, context->get(idx));
+ }
+
+ // Add function variable.
+ if (scope_info->HasFunctionName()) {
+ String* name = scope_info->FunctionName();
+ VariableMode mode;
+ int idx = scope_info->FunctionContextSlotIndex(name, &mode);
+ if (idx >= 0) {
+ SetClosureReference(js_obj, entry, name, context->get(idx));
+ }
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
+ if (js_obj->HasFastProperties()) {
+ DescriptorArray* descs = js_obj->map()->instance_descriptors();
+ int real_size = js_obj->map()->NumberOfOwnDescriptors();
+ for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ if (descs->GetDetails(i).descriptor_index() > real_size) continue;
+ switch (descs->GetType(i)) {
+ case FIELD: {
+ int index = descs->GetFieldIndex(i);
+
+ String* k = descs->GetKey(i);
+ if (index < js_obj->map()->inobject_properties()) {
+ Object* value = js_obj->InObjectPropertyAt(index);
+ if (k != heap_->hidden_string()) {
+ SetPropertyReference(
+ js_obj, entry,
+ k, value,
+ NULL,
+ js_obj->GetInObjectPropertyOffset(index));
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(
+ js_obj, entry,
+ "hidden_properties", value,
+ js_obj->GetInObjectPropertyOffset(index));
+ }
+ } else {
+ Object* value = js_obj->FastPropertyAt(index);
+ if (k != heap_->hidden_string()) {
+ SetPropertyReference(js_obj, entry, k, value);
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ }
+ }
+ break;
+ }
+ case CONSTANT_FUNCTION:
+ SetPropertyReference(
+ js_obj, entry,
+ descs->GetKey(i), descs->GetConstantFunction(i));
+ break;
+ case CALLBACKS: {
+ Object* callback_obj = descs->GetValue(i);
+ if (callback_obj->IsAccessorPair()) {
+ AccessorPair* accessors = AccessorPair::cast(callback_obj);
+ if (Object* getter = accessors->getter()) {
+ SetPropertyReference(js_obj, entry, descs->GetKey(i),
+ getter, "get-%s");
+ }
+ if (Object* setter = accessors->setter()) {
+ SetPropertyReference(js_obj, entry, descs->GetKey(i),
+ setter, "set-%s");
+ }
+ }
+ break;
+ }
+ case NORMAL: // only in slow mode
+ case HANDLER: // only in lookup results, not in descriptors
+ case INTERCEPTOR: // only in lookup results, not in descriptors
+ break;
+ case TRANSITION:
+ case NONEXISTENT:
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ StringDictionary* dictionary = js_obj->property_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ Object* target = dictionary->ValueAt(i);
+ // We assume that global objects can only have slow properties.
+ Object* value = target->IsJSGlobalPropertyCell()
+ ? JSGlobalPropertyCell::cast(target)->value()
+ : target;
+ if (k != heap_->hidden_string()) {
+ SetPropertyReference(js_obj, entry, String::cast(k), value);
+ } else {
+ TagObject(value, "(hidden properties)");
+ SetInternalReference(js_obj, entry, "hidden_properties", value);
+ }
+ }
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
+ if (js_obj->HasFastObjectElements()) {
+ FixedArray* elements = FixedArray::cast(js_obj->elements());
+ int length = js_obj->IsJSArray() ?
+ Smi::cast(JSArray::cast(js_obj)->length())->value() :
+ elements->length();
+ for (int i = 0; i < length; ++i) {
+ if (!elements->get(i)->IsTheHole()) {
+ SetElementReference(js_obj, entry, i, elements->get(i));
+ }
+ }
+ } else if (js_obj->HasDictionaryElements()) {
+ SeededNumberDictionary* dictionary = js_obj->element_dictionary();
+ int length = dictionary->Capacity();
+ for (int i = 0; i < length; ++i) {
+ Object* k = dictionary->KeyAt(i);
+ if (dictionary->IsKey(k)) {
+ ASSERT(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
+ }
+ }
+ }
+}
+
+
+void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
+ int length = js_obj->GetInternalFieldCount();
+ for (int i = 0; i < length; ++i) {
+ Object* o = js_obj->GetInternalField(i);
+ SetInternalReference(
+ js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
+ }
+}
+
+
+String* V8HeapExplorer::GetConstructorName(JSObject* object) {
+ Heap* heap = object->GetHeap();
+ if (object->IsJSFunction()) return heap->closure_string();
+ String* constructor_name = object->constructor_name();
+ if (constructor_name == heap->Object_string()) {
+ // Look up an immediate "constructor" property, if it is a function,
+ // return its name. This is for instances of binding objects, which
+ // have prototype constructor type "Object".
+ Object* constructor_prop = NULL;
+ LookupResult result(heap->isolate());
+ object->LocalLookupRealNamedProperty(heap->constructor_string(), &result);
+ if (!result.IsFound()) return object->constructor_name();
+
+ constructor_prop = result.GetLazyValue();
+ if (constructor_prop->IsJSFunction()) {
+ Object* maybe_name =
+ JSFunction::cast(constructor_prop)->shared()->name();
+ if (maybe_name->IsString()) {
+ String* name = String::cast(maybe_name);
+ if (name->length() > 0) return name;
+ }
+ }
+ }
+ return object->constructor_name();
+}
+
+
+HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
+ if (!obj->IsHeapObject()) return NULL;
+ return filler_->FindOrAddEntry(obj, this);
+}
+
+
+class RootsReferencesExtractor : public ObjectVisitor {
+ private:
+ struct IndexTag {
+ IndexTag(int index, VisitorSynchronization::SyncTag tag)
+ : index(index), tag(tag) { }
+ int index;
+ VisitorSynchronization::SyncTag tag;
+ };
+
+ public:
+ RootsReferencesExtractor()
+ : collecting_all_references_(false),
+ previous_reference_count_(0) {
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ if (collecting_all_references_) {
+ for (Object** p = start; p < end; p++) all_references_.Add(*p);
+ } else {
+ for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+ }
+ }
+
+ void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+ void FillReferences(V8HeapExplorer* explorer) {
+ ASSERT(strong_references_.length() <= all_references_.length());
+ for (int i = 0; i < reference_tags_.length(); ++i) {
+ explorer->SetGcRootsReference(reference_tags_[i].tag);
+ }
+ int strong_index = 0, all_index = 0, tags_index = 0;
+ while (all_index < all_references_.length()) {
+ if (strong_index < strong_references_.length() &&
+ strong_references_[strong_index] == all_references_[all_index]) {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ false,
+ all_references_[all_index++]);
+ ++strong_index;
+ } else {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ true,
+ all_references_[all_index++]);
+ }
+ if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ }
+ }
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ if (collecting_all_references_ &&
+ previous_reference_count_ != all_references_.length()) {
+ previous_reference_count_ = all_references_.length();
+ reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+ }
+ }
+
+ private:
+ bool collecting_all_references_;
+ List<Object*> strong_references_;
+ List<Object*> all_references_;
+ int previous_reference_count_;
+ List<IndexTag> reference_tags_;
+};
+
+
+bool V8HeapExplorer::IterateAndExtractReferences(
+ SnapshotFillerInterface* filler) {
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+
+ filler_ = filler;
+ bool interrupted = false;
+
+ // Heap iteration with filtering must be finished in any case.
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next(), progress_->ProgressStep()) {
+ if (!interrupted) {
+ ExtractReferences(obj);
+ if (!progress_->ProgressReport(false)) interrupted = true;
+ }
+ }
+ if (interrupted) {
+ filler_ = NULL;
+ return false;
+ }
+
+ SetRootGcRootsReference();
+ RootsReferencesExtractor extractor;
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ extractor.SetCollectingAllReferences();
+ heap_->IterateRoots(&extractor, VISIT_ALL);
+ extractor.FillReferences(this);
+ filler_ = NULL;
+ return progress_->ProgressReport(true);
+}
+
+
+bool V8HeapExplorer::IsEssentialObject(Object* object) {
+ return object->IsHeapObject()
+ && !object->IsOddball()
+ && object != heap_->empty_byte_array()
+ && object != heap_->empty_fixed_array()
+ && object != heap_->empty_descriptor_array()
+ && object != heap_->fixed_array_map()
+ && object != heap_->global_property_cell_map()
+ && object != heap_->shared_function_info_map()
+ && object != heap_->free_space_map()
+ && object != heap_->one_pointer_filler_map()
+ && object != heap_->two_pointer_filler_map();
+}
+
+
+void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
+ int parent_entry,
+ String* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
+ parent_entry,
+ collection_->names()->GetName(reference_name),
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
+ int parent_entry,
+ const char* reference_name,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetNamedReference(HeapGraphEdge::kShortcut,
+ parent_entry,
+ reference_name,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetIndexedReference(HeapGraphEdge::kElement,
+ parent_entry,
+ index,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
+ int parent_entry,
+ const char* reference_name,
+ Object* child_obj,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ parent_entry,
+ reference_name,
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry == NULL) return;
+ if (IsEssentialObject(child_obj)) {
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ parent_entry,
+ collection_->names()->GetName(index),
+ child_entry);
+ }
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+}
+
+
+void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL && IsEssentialObject(child_obj)) {
+ filler_->SetIndexedReference(HeapGraphEdge::kHidden,
+ parent_entry,
+ index,
+ child_entry);
+ }
+}
+
+
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+ int parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+ parent_entry,
+ index,
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
+void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
+ int parent_entry,
+ String* reference_name,
+ Object* child_obj,
+ const char* name_format_string,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ HeapGraphEdge::Type type = reference_name->length() > 0 ?
+ HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
+ const char* name = name_format_string != NULL ?
+ collection_->names()->GetFormatted(
+ name_format_string,
+ *reference_name->ToCString(DISALLOW_NULLS,
+ ROBUST_STRING_TRAVERSAL)) :
+ collection_->names()->GetName(reference_name);
+
+ filler_->SetNamedReference(type,
+ parent_entry,
+ name,
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
+void V8HeapExplorer::SetRootGcRootsReference() {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->root()->index(),
+ snapshot_->gc_roots());
+}
+
+
+void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ ASSERT(child_entry != NULL);
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kShortcut,
+ snapshot_->root()->index(),
+ child_entry);
+}
+
+
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->gc_roots()->index(),
+ snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ const char* name = GetStrongGcSubrootName(child_obj);
+ if (name != NULL) {
+ filler_->SetNamedReference(
+ HeapGraphEdge::kInternal,
+ snapshot_->gc_subroot(tag)->index(),
+ name,
+ child_entry);
+ } else {
+ filler_->SetIndexedAutoIndexReference(
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+ snapshot_->gc_subroot(tag)->index(),
+ child_entry);
+ }
+ }
+}
+
+
+const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
+ if (strong_gc_subroot_names_.is_empty()) {
+#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
+#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
+ STRONG_ROOT_LIST(ROOT_NAME)
+#undef ROOT_NAME
+#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
+ STRUCT_LIST(STRUCT_MAP_NAME)
+#undef STRUCT_MAP_NAME
+#define STRING_NAME(name, str) NAME_ENTRY(name)
+ INTERNALIZED_STRING_LIST(STRING_NAME)
+#undef STRING_NAME
+#undef NAME_ENTRY
+ CHECK(!strong_gc_subroot_names_.is_empty());
+ }
+ return strong_gc_subroot_names_.GetTag(object);
+}
+
+
+void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
+ if (IsEssentialObject(obj)) {
+ HeapEntry* entry = GetEntry(obj);
+ if (entry->name()[0] == '\0') {
+ entry->set_name(tag);
+ }
+ }
+}
+
+
+class GlobalObjectsEnumerator : public ObjectVisitor {
+ public:
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsNativeContext()) {
+ Context* context = Context::cast(*p);
+ JSObject* proxy = context->global_proxy();
+ if (proxy->IsJSGlobalProxy()) {
+ Object* global = proxy->map()->prototype();
+ if (global->IsJSGlobalObject()) {
+ objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
+ }
+ }
+ }
+ }
+ }
+ int count() { return objects_.length(); }
+ Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
+
+ private:
+ List<Handle<JSGlobalObject> > objects_;
+};
+
+
+// Modifies heap. Must not be run during heap traversal.
+void V8HeapExplorer::TagGlobalObjects() {
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+ GlobalObjectsEnumerator enumerator;
+ isolate->global_handles()->IterateAllRoots(&enumerator);
+ const char** urls = NewArray<const char*>(enumerator.count());
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ if (global_object_name_resolver_) {
+ HandleScope scope(isolate);
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ urls[i] = global_object_name_resolver_->GetName(
+ Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
+ } else {
+ urls[i] = NULL;
+ }
+ }
+
+ AssertNoAllocation no_allocation;
+ for (int i = 0, l = enumerator.count(); i < l; ++i) {
+ objects_tags_.SetTag(*enumerator.at(i), urls[i]);
+ }
+
+ DeleteArray(urls);
+}
+
+
+class GlobalHandlesExtractor : public ObjectVisitor {
+ public:
+ explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
+ : explorer_(explorer) {}
+ virtual ~GlobalHandlesExtractor() {}
+ virtual void VisitPointers(Object** start, Object** end) {
+ UNREACHABLE();
+ }
+ virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
+ explorer_->VisitSubtreeWrapper(p, class_id);
+ }
+ private:
+ NativeObjectsExplorer* explorer_;
+};
+
+
+class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
+ public:
+ BasicHeapEntriesAllocator(
+ HeapSnapshot* snapshot,
+ HeapEntry::Type entries_type)
+ : snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ entries_type_(entries_type) {
+ }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ private:
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ HeapEntry::Type entries_type_;
+};
+
+
+HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
+ v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
+ intptr_t elements = info->GetElementCount();
+ intptr_t size = info->GetSizeInBytes();
+ const char* name = elements != -1
+ ? collection_->names()->GetFormatted(
+ "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
+ : collection_->names()->GetCopy(info->GetLabel());
+ return snapshot_->AddEntry(
+ entries_type_,
+ name,
+ HeapObjectsMap::GenerateId(info),
+ size != -1 ? static_cast<int>(size) : 0);
+}
+
+
+NativeObjectsExplorer::NativeObjectsExplorer(
+ HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
+ : snapshot_(snapshot),
+ collection_(snapshot_->collection()),
+ progress_(progress),
+ embedder_queried_(false),
+ objects_by_info_(RetainedInfosMatch),
+ native_groups_(StringsMatch),
+ filler_(NULL) {
+ synthetic_entries_allocator_ =
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
+ native_entries_allocator_ =
+ new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
+}
+
+
+NativeObjectsExplorer::~NativeObjectsExplorer() {
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ info->Dispose();
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ delete objects;
+ }
+ for (HashMap::Entry* p = native_groups_.Start();
+ p != NULL;
+ p = native_groups_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
+ info->Dispose();
+ }
+ delete synthetic_entries_allocator_;
+ delete native_entries_allocator_;
+}
+
+
+int NativeObjectsExplorer::EstimateObjectsCount() {
+ FillRetainedObjects();
+ return objects_by_info_.occupancy();
+}
+
+
+void NativeObjectsExplorer::FillRetainedObjects() {
+ if (embedder_queried_) return;
+ Isolate* isolate = Isolate::Current();
+ const GCType major_gc_type = kGCTypeMarkSweepCompact;
+ // Record objects that are joined into ObjectGroups.
+ isolate->heap()->CallGCPrologueCallbacks(major_gc_type);
+ List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
+ for (int i = 0; i < groups->length(); ++i) {
+ ObjectGroup* group = groups->at(i);
+ if (group->info_ == NULL) continue;
+ List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
+ for (size_t j = 0; j < group->length_; ++j) {
+ HeapObject* obj = HeapObject::cast(*group->objects_[j]);
+ list->Add(obj);
+ in_groups_.Insert(obj);
+ }
+ group->info_ = NULL; // Acquire info object ownership.
+ }
+ isolate->global_handles()->RemoveObjectGroups();
+ isolate->heap()->CallGCEpilogueCallbacks(major_gc_type);
+ // Record objects that are not in ObjectGroups, but have class ID.
+ GlobalHandlesExtractor extractor(this);
+ isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
+ embedder_queried_ = true;
+}
+
+void NativeObjectsExplorer::FillImplicitReferences() {
+ Isolate* isolate = Isolate::Current();
+ List<ImplicitRefGroup*>* groups =
+ isolate->global_handles()->implicit_ref_groups();
+ for (int i = 0; i < groups->length(); ++i) {
+ ImplicitRefGroup* group = groups->at(i);
+ HeapObject* parent = *group->parent_;
+ int parent_entry =
+ filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
+ ASSERT(parent_entry != HeapEntry::kNoEntry);
+ Object*** children = group->children_;
+ for (size_t j = 0; j < group->length_; ++j) {
+ Object* child = *children[j];
+ HeapEntry* child_entry =
+ filler_->FindOrAddEntry(child, native_entries_allocator_);
+ filler_->SetNamedReference(
+ HeapGraphEdge::kInternal,
+ parent_entry,
+ "native",
+ child_entry);
+ }
+ }
+ isolate->global_handles()->RemoveImplicitRefGroups();
+}
+
+List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
+ v8::RetainedObjectInfo* info) {
+ HashMap::Entry* entry =
+ objects_by_info_.Lookup(info, InfoHash(info), true);
+ if (entry->value != NULL) {
+ info->Dispose();
+ } else {
+ entry->value = new List<HeapObject*>(4);
+ }
+ return reinterpret_cast<List<HeapObject*>* >(entry->value);
+}
+
+
+bool NativeObjectsExplorer::IterateAndExtractReferences(
+ SnapshotFillerInterface* filler) {
+ filler_ = filler;
+ FillRetainedObjects();
+ FillImplicitReferences();
+ if (EstimateObjectsCount() > 0) {
+ for (HashMap::Entry* p = objects_by_info_.Start();
+ p != NULL;
+ p = objects_by_info_.Next(p)) {
+ v8::RetainedObjectInfo* info =
+ reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
+ SetNativeRootReference(info);
+ List<HeapObject*>* objects =
+ reinterpret_cast<List<HeapObject*>* >(p->value);
+ for (int i = 0; i < objects->length(); ++i) {
+ SetWrapperNativeReferences(objects->at(i), info);
+ }
+ }
+ SetRootNativeRootsReference();
+ }
+ filler_ = NULL;
+ return true;
+}
+
+
+class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
+ public:
+ explicit NativeGroupRetainedObjectInfo(const char* label)
+ : disposed_(false),
+ hash_(reinterpret_cast<intptr_t>(label)),
+ label_(label) {
+ }
+
+ virtual ~NativeGroupRetainedObjectInfo() {}
+ virtual void Dispose() {
+ CHECK(!disposed_);
+ disposed_ = true;
+ delete this;
+ }
+ virtual bool IsEquivalent(RetainedObjectInfo* other) {
+ return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
+ }
+ virtual intptr_t GetHash() { return hash_; }
+ virtual const char* GetLabel() { return label_; }
+
+ private:
+ bool disposed_;
+ intptr_t hash_;
+ const char* label_;
+};
+
+
+NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
+ const char* label) {
+ const char* label_copy = collection_->names()->GetCopy(label);
+ uint32_t hash = StringHasher::HashSequentialString(
+ label_copy,
+ static_cast<int>(strlen(label_copy)),
+ HEAP->HashSeed());
+ HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
+ hash, true);
+ if (entry->value == NULL) {
+ entry->value = new NativeGroupRetainedObjectInfo(label);
+ }
+ return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+}
+
+
+void NativeObjectsExplorer::SetNativeRootReference(
+ v8::RetainedObjectInfo* info) {
+ HeapEntry* child_entry =
+ filler_->FindOrAddEntry(info, native_entries_allocator_);
+ ASSERT(child_entry != NULL);
+ NativeGroupRetainedObjectInfo* group_info =
+ FindOrAddGroupInfo(info->GetGroupLabel());
+ HeapEntry* group_entry =
+ filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
+ filler_->SetNamedAutoIndexReference(
+ HeapGraphEdge::kInternal,
+ group_entry->index(),
+ child_entry);
+}
+
+
+void NativeObjectsExplorer::SetWrapperNativeReferences(
+ HeapObject* wrapper, v8::RetainedObjectInfo* info) {
+ HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
+ ASSERT(wrapper_entry != NULL);
+ HeapEntry* info_entry =
+ filler_->FindOrAddEntry(info, native_entries_allocator_);
+ ASSERT(info_entry != NULL);
+ filler_->SetNamedReference(HeapGraphEdge::kInternal,
+ wrapper_entry->index(),
+ "native",
+ info_entry);
+ filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
+ info_entry->index(),
+ wrapper_entry);
+}
+
+
+void NativeObjectsExplorer::SetRootNativeRootsReference() {
+ for (HashMap::Entry* entry = native_groups_.Start();
+ entry;
+ entry = native_groups_.Next(entry)) {
+ NativeGroupRetainedObjectInfo* group_info =
+ static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
+ HeapEntry* group_entry =
+ filler_->FindOrAddEntry(group_info, native_entries_allocator_);
+ ASSERT(group_entry != NULL);
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ snapshot_->root()->index(),
+ group_entry);
+ }
+}
+
+
+void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
+ if (in_groups_.Contains(*p)) return;
+ Isolate* isolate = Isolate::Current();
+ v8::RetainedObjectInfo* info =
+ isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
+ if (info == NULL) return;
+ GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
+}
+
+
+class SnapshotFiller : public SnapshotFillerInterface {
+ public:
+ explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
+ : snapshot_(snapshot),
+ collection_(snapshot->collection()),
+ entries_(entries) { }
+ HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = allocator->AllocateEntry(ptr);
+ entries_->Pair(ptr, entry->index());
+ return entry;
+ }
+ HeapEntry* FindEntry(HeapThing ptr) {
+ int index = entries_->Map(ptr);
+ return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
+ }
+ HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
+ HeapEntry* entry = FindEntry(ptr);
+ return entry != NULL ? entry : AddEntry(ptr, allocator);
+ }
+ void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent,
+ int index,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetIndexedReference(type, index, child_entry);
+ }
+ void SetNamedReference(HeapGraphEdge::Type type,
+ int parent,
+ const char* reference_name,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ parent_entry->SetNamedReference(type, reference_name, child_entry);
+ }
+ void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent,
+ HeapEntry* child_entry) {
+ HeapEntry* parent_entry = &snapshot_->entries()[parent];
+ int index = parent_entry->children_count() + 1;
+ parent_entry->SetNamedReference(
+ type,
+ collection_->names()->GetName(index),
+ child_entry);
+ }
+
+ private:
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ HeapEntriesMap* entries_;
+};
+
+
+HeapSnapshotGenerator::HeapSnapshotGenerator(
+ HeapSnapshot* snapshot,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver,
+ Heap* heap)
+ : snapshot_(snapshot),
+ control_(control),
+ v8_heap_explorer_(snapshot_, this, resolver),
+ dom_explorer_(snapshot_, this),
+ heap_(heap) {
+}
+
+
+bool HeapSnapshotGenerator::GenerateSnapshot() {
+ v8_heap_explorer_.TagGlobalObjects();
+
+ // TODO(1562) Profiler assumes that any object that is in the heap after
+ // full GC is reachable from the root when computing dominators.
+ // This is not true for weakly reachable objects.
+ // As a temporary solution we call GC twice.
+ Isolate::Current()->heap()->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
+ Isolate::Current()->heap()->CollectAllGarbage(
+ Heap::kMakeHeapIterableMask,
+ "HeapSnapshotGenerator::GenerateSnapshot");
+
+#ifdef VERIFY_HEAP
+ Heap* debug_heap = Isolate::Current()->heap();
+ CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
+ CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
+ CHECK(!debug_heap->code_space()->was_swept_conservatively());
+ CHECK(!debug_heap->cell_space()->was_swept_conservatively());
+ CHECK(!debug_heap->map_space()->was_swept_conservatively());
+#endif
+
+ // The following code uses heap iterators, so we want the heap to be
+ // stable. It should follow TagGlobalObjects as that can allocate.
+ AssertNoAllocation no_alloc;
+
+#ifdef VERIFY_HEAP
+ debug_heap->Verify();
+#endif
+
+ SetProgressTotal(1); // 1 pass.
+
+#ifdef VERIFY_HEAP
+ debug_heap->Verify();
+#endif
+
+ if (!FillReferences()) return false;
+
+ snapshot_->FillChildren();
+ snapshot_->RememberLastJSObjectId();
+
+ progress_counter_ = progress_total_;
+ if (!ProgressReport(true)) return false;
+ return true;
+}
+
+
+void HeapSnapshotGenerator::ProgressStep() {
+ ++progress_counter_;
+}
+
+
+bool HeapSnapshotGenerator::ProgressReport(bool force) {
+ const int kProgressReportGranularity = 10000;
+ if (control_ != NULL
+ && (force || progress_counter_ % kProgressReportGranularity == 0)) {
+ return
+ control_->ReportProgressValue(progress_counter_, progress_total_) ==
+ v8::ActivityControl::kContinue;
+ }
+ return true;
+}
+
+
+void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
+ if (control_ == NULL) return;
+ HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable);
+ progress_total_ = iterations_count * (
+ v8_heap_explorer_.EstimateObjectsCount(&iterator) +
+ dom_explorer_.EstimateObjectsCount());
+ progress_counter_ = 0;
+}
+
+
+bool HeapSnapshotGenerator::FillReferences() {
+ SnapshotFiller filler(snapshot_, &entries_);
+ v8_heap_explorer_.AddRootEntries(&filler);
+ return v8_heap_explorer_.IterateAndExtractReferences(&filler)
+ && dom_explorer_.IterateAndExtractReferences(&filler);
+}
+
+
+template<int bytes> struct MaxDecimalDigitsIn;
+template<> struct MaxDecimalDigitsIn<4> {
+ static const int kSigned = 11;
+ static const int kUnsigned = 10;
+};
+template<> struct MaxDecimalDigitsIn<8> {
+ static const int kSigned = 20;
+ static const int kUnsigned = 20;
+};
+
+
+class OutputStreamWriter {
+ public:
+ explicit OutputStreamWriter(v8::OutputStream* stream)
+ : stream_(stream),
+ chunk_size_(stream->GetChunkSize()),
+ chunk_(chunk_size_),
+ chunk_pos_(0),
+ aborted_(false) {
+ ASSERT(chunk_size_ > 0);
+ }
+ bool aborted() { return aborted_; }
+ void AddCharacter(char c) {
+ ASSERT(c != '\0');
+ ASSERT(chunk_pos_ < chunk_size_);
+ chunk_[chunk_pos_++] = c;
+ MaybeWriteChunk();
+ }
+ void AddString(const char* s) {
+ AddSubstring(s, StrLength(s));
+ }
+ void AddSubstring(const char* s, int n) {
+ if (n <= 0) return;
+ ASSERT(static_cast<size_t>(n) <= strlen(s));
+ const char* s_end = s + n;
+ while (s < s_end) {
+ int s_chunk_size = Min(
+ chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
+ ASSERT(s_chunk_size > 0);
+ memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ s += s_chunk_size;
+ chunk_pos_ += s_chunk_size;
+ MaybeWriteChunk();
+ }
+ }
+ void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
+ void Finalize() {
+ if (aborted_) return;
+ ASSERT(chunk_pos_ < chunk_size_);
+ if (chunk_pos_ != 0) {
+ WriteChunk();
+ }
+ stream_->EndOfStream();
+ }
+
+ private:
+ template<typename T>
+ void AddNumberImpl(T n, const char* format) {
+ // Buffer for the longest value plus trailing \0
+ static const int kMaxNumberSize =
+ MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
+ if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
+ int result = OS::SNPrintF(
+ chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
+ ASSERT(result != -1);
+ chunk_pos_ += result;
+ MaybeWriteChunk();
+ } else {
+ EmbeddedVector<char, kMaxNumberSize> buffer;
+ int result = OS::SNPrintF(buffer, format, n);
+ USE(result);
+ ASSERT(result != -1);
+ AddString(buffer.start());
+ }
+ }
+ void MaybeWriteChunk() {
+ ASSERT(chunk_pos_ <= chunk_size_);
+ if (chunk_pos_ == chunk_size_) {
+ WriteChunk();
+ }
+ }
+ void WriteChunk() {
+ if (aborted_) return;
+ if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
+ v8::OutputStream::kAbort) aborted_ = true;
+ chunk_pos_ = 0;
+ }
+
+ v8::OutputStream* stream_;
+ int chunk_size_;
+ ScopedVector<char> chunk_;
+ int chunk_pos_;
+ bool aborted_;
+};
+
+
+// type, name|index, to_node.
+const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
+// type, name, id, self_size, children_index.
+const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
+
+void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
+ ASSERT(writer_ == NULL);
+ writer_ = new OutputStreamWriter(stream);
+
+ HeapSnapshot* original_snapshot = NULL;
+ if (snapshot_->RawSnapshotSize() >=
+ SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
+ // The snapshot is too big. Serialize a fake snapshot.
+ original_snapshot = snapshot_;
+ snapshot_ = CreateFakeSnapshot();
+ }
+
+ SerializeImpl();
+
+ delete writer_;
+ writer_ = NULL;
+
+ if (original_snapshot != NULL) {
+ delete snapshot_;
+ snapshot_ = original_snapshot;
+ }
+}
+
+
+HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
+ HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
+ HeapSnapshot::kFull,
+ snapshot_->title(),
+ snapshot_->uid());
+ result->AddRootEntry();
+ const char* text = snapshot_->collection()->names()->GetFormatted(
+ "The snapshot is too big. "
+ "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
+ "Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
+ SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
+ (snapshot_->RawSnapshotSize() + MB - 1) / MB);
+ HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
+ result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
+ result->FillChildren();
+ return result;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeImpl() {
+ ASSERT(0 == snapshot_->root()->index());
+ writer_->AddCharacter('{');
+ writer_->AddString("\"snapshot\":{");
+ SerializeSnapshot();
+ if (writer_->aborted()) return;
+ writer_->AddString("},\n");
+ writer_->AddString("\"nodes\":[");
+ SerializeNodes();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"edges\":[");
+ SerializeEdges();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+ writer_->AddString("\"strings\":[");
+ SerializeStrings();
+ if (writer_->aborted()) return;
+ writer_->AddCharacter(']');
+ writer_->AddCharacter('}');
+ writer_->Finalize();
+}
+
+
+int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
+ HashMap::Entry* cache_entry = strings_.Lookup(
+ const_cast<char*>(s), ObjectHash(s), true);
+ if (cache_entry->value == NULL) {
+ cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
+ }
+ return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
+}
+
+
+static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
+ int number_of_digits = 0;
+ unsigned t = value;
+ do {
+ ++number_of_digits;
+ } while (t /= 10);
+
+ buffer_pos += number_of_digits;
+ int result = buffer_pos;
+ do {
+ int last_digit = value % 10;
+ buffer[--buffer_pos] = '0' + last_digit;
+ value /= 10;
+ } while (value);
+ return result;
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
+ bool first_edge) {
+ // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
+ static const int kBufferSize =
+ MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
+ EmbeddedVector<char, kBufferSize> buffer;
+ int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
+ || edge->type() == HeapGraphEdge::kHidden
+ || edge->type() == HeapGraphEdge::kWeak
+ ? edge->index() : GetStringId(edge->name());
+ int buffer_pos = 0;
+ if (!first_edge) {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(edge->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeEdges() {
+ List<HeapGraphEdge*>& edges = snapshot_->children();
+ for (int i = 0; i < edges.length(); ++i) {
+ ASSERT(i == 0 ||
+ edges[i - 1]->from()->index() <= edges[i]->from()->index());
+ SerializeEdge(edges[i], i == 0);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
+ // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
+ static const int kBufferSize =
+ 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
+ + 5 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ int buffer_pos = 0;
+ if (entry_index(entry) != 0) {
+ buffer[buffer_pos++] = ',';
+ }
+ buffer_pos = utoa(entry->type(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeNodes() {
+ List<HeapEntry>& entries = snapshot_->entries();
+ for (int i = 0; i < entries.length(); ++i) {
+ SerializeNode(&entries[i]);
+ if (writer_->aborted()) return;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeSnapshot() {
+ writer_->AddString("\"title\":\"");
+ writer_->AddString(snapshot_->title());
+ writer_->AddString("\"");
+ writer_->AddString(",\"uid\":");
+ writer_->AddNumber(snapshot_->uid());
+ writer_->AddString(",\"meta\":");
+ // The object describing node serialization layout.
+ // We use a set of macros to improve readability.
+#define JSON_A(s) "[" s "]"
+#define JSON_O(s) "{" s "}"
+#define JSON_S(s) "\"" s "\""
+ writer_->AddString(JSON_O(
+ JSON_S("node_fields") ":" JSON_A(
+ JSON_S("type") ","
+ JSON_S("name") ","
+ JSON_S("id") ","
+ JSON_S("self_size") ","
+ JSON_S("edge_count")) ","
+ JSON_S("node_types") ":" JSON_A(
+ JSON_A(
+ JSON_S("hidden") ","
+ JSON_S("array") ","
+ JSON_S("string") ","
+ JSON_S("object") ","
+ JSON_S("code") ","
+ JSON_S("closure") ","
+ JSON_S("regexp") ","
+ JSON_S("number") ","
+ JSON_S("native") ","
+ JSON_S("synthetic")) ","
+ JSON_S("string") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number") ","
+ JSON_S("number")) ","
+ JSON_S("edge_fields") ":" JSON_A(
+ JSON_S("type") ","
+ JSON_S("name_or_index") ","
+ JSON_S("to_node")) ","
+ JSON_S("edge_types") ":" JSON_A(
+ JSON_A(
+ JSON_S("context") ","
+ JSON_S("element") ","
+ JSON_S("property") ","
+ JSON_S("internal") ","
+ JSON_S("hidden") ","
+ JSON_S("shortcut") ","
+ JSON_S("weak")) ","
+ JSON_S("string_or_number") ","
+ JSON_S("node"))));
+#undef JSON_S
+#undef JSON_O
+#undef JSON_A
+ writer_->AddString(",\"node_count\":");
+ writer_->AddNumber(snapshot_->entries().length());
+ writer_->AddString(",\"edge_count\":");
+ writer_->AddNumber(snapshot_->edges().length());
+}
+
+
+static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
+ static const char hex_chars[] = "0123456789ABCDEF";
+ w->AddString("\\u");
+ w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
+ w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
+ w->AddCharacter(hex_chars[u & 0xf]);
+}
+
+void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
+ writer_->AddCharacter('\n');
+ writer_->AddCharacter('\"');
+ for ( ; *s != '\0'; ++s) {
+ switch (*s) {
+ case '\b':
+ writer_->AddString("\\b");
+ continue;
+ case '\f':
+ writer_->AddString("\\f");
+ continue;
+ case '\n':
+ writer_->AddString("\\n");
+ continue;
+ case '\r':
+ writer_->AddString("\\r");
+ continue;
+ case '\t':
+ writer_->AddString("\\t");
+ continue;
+ case '\"':
+ case '\\':
+ writer_->AddCharacter('\\');
+ writer_->AddCharacter(*s);
+ continue;
+ default:
+ if (*s > 31 && *s < 128) {
+ writer_->AddCharacter(*s);
+ } else if (*s <= 31) {
+ // Special character with no dedicated literal.
+ WriteUChar(writer_, *s);
+ } else {
+ // Convert UTF-8 into \u UTF-16 literal.
+ unsigned length = 1, cursor = 0;
+ for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
+ unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
+ if (c != unibrow::Utf8::kBadChar) {
+ WriteUChar(writer_, c);
+ ASSERT(cursor != 0);
+ s += cursor - 1;
+ } else {
+ writer_->AddCharacter('?');
+ }
+ }
+ }
+ }
+ writer_->AddCharacter('\"');
+}
+
+
+void HeapSnapshotJSONSerializer::SerializeStrings() {
+ List<HashMap::Entry*> sorted_strings;
+ SortHashMap(&strings_, &sorted_strings);
+ writer_->AddString("\"<dummy>\"");
+ for (int i = 0; i < sorted_strings.length(); ++i) {
+ writer_->AddCharacter(',');
+ SerializeString(
+ reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
+ if (writer_->aborted()) return;
+ }
+}
+
+
+template<typename T>
+inline static int SortUsingEntryValue(const T* x, const T* y) {
+ uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
+ uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
+ if (x_uint > y_uint) {
+ return 1;
+ } else if (x_uint == y_uint) {
+ return 0;
+ } else {
+ return -1;
+ }
+}
+
+
+void HeapSnapshotJSONSerializer::SortHashMap(
+ HashMap* map, List<HashMap::Entry*>* sorted_entries) {
+ for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
+ sorted_entries->Add(p);
+ sorted_entries->Sort(SortUsingEntryValue);
+}
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/heap-snapshot-generator.h b/src/3rdparty/v8/src/heap-snapshot-generator.h
new file mode 100644
index 0000000..77c659a
--- /dev/null
+++ b/src/3rdparty/v8/src/heap-snapshot-generator.h
@@ -0,0 +1,697 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
+#define V8_HEAP_SNAPSHOT_GENERATOR_H_
+
+namespace v8 {
+namespace internal {
+
+class HeapEntry;
+class HeapSnapshot;
+
+class HeapGraphEdge BASE_EMBEDDED {
+ public:
+ enum Type {
+ kContextVariable = v8::HeapGraphEdge::kContextVariable,
+ kElement = v8::HeapGraphEdge::kElement,
+ kProperty = v8::HeapGraphEdge::kProperty,
+ kInternal = v8::HeapGraphEdge::kInternal,
+ kHidden = v8::HeapGraphEdge::kHidden,
+ kShortcut = v8::HeapGraphEdge::kShortcut,
+ kWeak = v8::HeapGraphEdge::kWeak
+ };
+
+ HeapGraphEdge() { }
+ HeapGraphEdge(Type type, const char* name, int from, int to);
+ HeapGraphEdge(Type type, int index, int from, int to);
+ void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
+
+ Type type() const { return static_cast<Type>(type_); }
+ int index() const {
+ ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
+ return index_;
+ }
+ const char* name() const {
+ ASSERT(type_ == kContextVariable
+ || type_ == kProperty
+ || type_ == kInternal
+ || type_ == kShortcut);
+ return name_;
+ }
+ INLINE(HeapEntry* from() const);
+ HeapEntry* to() const { return to_entry_; }
+
+ private:
+ INLINE(HeapSnapshot* snapshot() const);
+
+ unsigned type_ : 3;
+ int from_index_ : 29;
+ union {
+ // During entries population |to_index_| is used for storing the index,
+ // afterwards it is replaced with a pointer to the entry.
+ int to_index_;
+ HeapEntry* to_entry_;
+ };
+ union {
+ int index_;
+ const char* name_;
+ };
+};
+
+
+// HeapEntry instances represent an entity from the heap (or a special
+// virtual node, e.g. root).
+class HeapEntry BASE_EMBEDDED {
+ public:
+ enum Type {
+ kHidden = v8::HeapGraphNode::kHidden,
+ kArray = v8::HeapGraphNode::kArray,
+ kString = v8::HeapGraphNode::kString,
+ kObject = v8::HeapGraphNode::kObject,
+ kCode = v8::HeapGraphNode::kCode,
+ kClosure = v8::HeapGraphNode::kClosure,
+ kRegExp = v8::HeapGraphNode::kRegExp,
+ kHeapNumber = v8::HeapGraphNode::kHeapNumber,
+ kNative = v8::HeapGraphNode::kNative,
+ kSynthetic = v8::HeapGraphNode::kSynthetic
+ };
+ static const int kNoEntry;
+
+ HeapEntry() { }
+ HeapEntry(HeapSnapshot* snapshot,
+ Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int self_size);
+
+ HeapSnapshot* snapshot() { return snapshot_; }
+ Type type() { return static_cast<Type>(type_); }
+ const char* name() { return name_; }
+ void set_name(const char* name) { name_ = name; }
+ inline SnapshotObjectId id() { return id_; }
+ int self_size() { return self_size_; }
+ INLINE(int index() const);
+ int children_count() const { return children_count_; }
+ INLINE(int set_children_index(int index));
+ void add_child(HeapGraphEdge* edge) {
+ children_arr()[children_count_++] = edge;
+ }
+ Vector<HeapGraphEdge*> children() {
+ return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
+
+ void SetIndexedReference(
+ HeapGraphEdge::Type type, int index, HeapEntry* entry);
+ void SetNamedReference(
+ HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
+
+ void Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent);
+
+ Handle<HeapObject> GetHeapObject();
+
+ private:
+ INLINE(HeapGraphEdge** children_arr());
+ const char* TypeAsString();
+
+ unsigned type_: 4;
+ int children_count_: 28;
+ int children_index_;
+ int self_size_;
+ SnapshotObjectId id_;
+ HeapSnapshot* snapshot_;
+ const char* name_;
+};
+
+
+class HeapSnapshotsCollection;
+
+// HeapSnapshot represents a single heap snapshot. It is stored in
+// HeapSnapshotsCollection, which is also a factory for
+// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
+// to be able to return them even if they were collected.
+// HeapSnapshotGenerator fills in a HeapSnapshot.
+class HeapSnapshot {
+ public:
+ enum Type {
+ kFull = v8::HeapSnapshot::kFull
+ };
+
+ HeapSnapshot(HeapSnapshotsCollection* collection,
+ Type type,
+ const char* title,
+ unsigned uid);
+ void Delete();
+
+ HeapSnapshotsCollection* collection() { return collection_; }
+ Type type() { return type_; }
+ const char* title() { return title_; }
+ unsigned uid() { return uid_; }
+ size_t RawSnapshotSize() const;
+ HeapEntry* root() { return &entries_[root_index_]; }
+ HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
+ HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
+ HeapEntry* gc_subroot(int index) {
+ return &entries_[gc_subroot_indexes_[index]];
+ }
+ List<HeapEntry>& entries() { return entries_; }
+ List<HeapGraphEdge>& edges() { return edges_; }
+ List<HeapGraphEdge*>& children() { return children_; }
+ void RememberLastJSObjectId();
+ SnapshotObjectId max_snapshot_js_object_id() const {
+ return max_snapshot_js_object_id_;
+ }
+
+ HeapEntry* AddEntry(HeapEntry::Type type,
+ const char* name,
+ SnapshotObjectId id,
+ int size);
+ HeapEntry* AddRootEntry();
+ HeapEntry* AddGcRootsEntry();
+ HeapEntry* AddGcSubrootEntry(int tag);
+ HeapEntry* AddNativesRootEntry();
+ HeapEntry* GetEntryById(SnapshotObjectId id);
+ List<HeapEntry*>* GetSortedEntriesList();
+ void FillChildren();
+
+ void Print(int max_depth);
+ void PrintEntriesSize();
+
+ private:
+ HeapSnapshotsCollection* collection_;
+ Type type_;
+ const char* title_;
+ unsigned uid_;
+ int root_index_;
+ int gc_roots_index_;
+ int natives_root_index_;
+ int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
+ List<HeapEntry> entries_;
+ List<HeapGraphEdge> edges_;
+ List<HeapGraphEdge*> children_;
+ List<HeapEntry*> sorted_entries_;
+ SnapshotObjectId max_snapshot_js_object_id_;
+
+ friend class HeapSnapshotTester;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
+};
+
+
+class HeapObjectsMap {
+ public:
+ explicit HeapObjectsMap(Heap* heap);
+
+ Heap* heap() const { return heap_; }
+
+ void SnapshotGenerationFinished();
+ SnapshotObjectId FindEntry(Address addr);
+ SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
+ void MoveObject(Address from, Address to);
+ SnapshotObjectId last_assigned_id() const {
+ return next_id_ - kObjectIdStep;
+ }
+
+ void StopHeapObjectsTracking();
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ size_t GetUsedMemorySize() const;
+
+ static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
+ static inline SnapshotObjectId GetNthGcSubrootId(int delta);
+
+ static const int kObjectIdStep = 2;
+ static const SnapshotObjectId kInternalRootObjectId;
+ static const SnapshotObjectId kGcRootsObjectId;
+ static const SnapshotObjectId kNativesRootObjectId;
+ static const SnapshotObjectId kGcRootsFirstSubrootId;
+ static const SnapshotObjectId kFirstAvailableObjectId;
+
+ private:
+ struct EntryInfo {
+ EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
+ : id(id), addr(addr), size(size), accessed(true) { }
+ EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
+ : id(id), addr(addr), size(size), accessed(accessed) { }
+ SnapshotObjectId id;
+ Address addr;
+ unsigned int size;
+ bool accessed;
+ };
+ struct TimeInterval {
+ explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
+ SnapshotObjectId id;
+ uint32_t size;
+ uint32_t count;
+ };
+
+ void UpdateHeapObjectsMap();
+ void RemoveDeadEntries();
+
+ static bool AddressesMatch(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t AddressHash(Address addr) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
+ v8::internal::kZeroHashSeed);
+ }
+
+ SnapshotObjectId next_id_;
+ HashMap entries_map_;
+ List<EntryInfo> entries_;
+ List<TimeInterval> time_intervals_;
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
+};
+
+
+class HeapSnapshotsCollection {
+ public:
+ explicit HeapSnapshotsCollection(Heap* heap);
+ ~HeapSnapshotsCollection();
+
+ Heap* heap() const { return ids_.heap(); }
+
+ bool is_tracking_objects() { return is_tracking_objects_; }
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
+ return ids_.PushHeapObjectsStats(stream);
+ }
+ void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
+ void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
+
+ HeapSnapshot* NewSnapshot(
+ HeapSnapshot::Type type, const char* name, unsigned uid);
+ void SnapshotGenerationFinished(HeapSnapshot* snapshot);
+ List<HeapSnapshot*>* snapshots() { return &snapshots_; }
+ HeapSnapshot* GetSnapshot(unsigned uid);
+ void RemoveSnapshot(HeapSnapshot* snapshot);
+
+ StringsStorage* names() { return &names_; }
+ TokenEnumerator* token_enumerator() { return token_enumerator_; }
+
+ SnapshotObjectId FindObjectId(Address object_addr) {
+ return ids_.FindEntry(object_addr);
+ }
+ SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
+ return ids_.FindOrAddEntry(object_addr, object_size);
+ }
+ Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
+ void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+ SnapshotObjectId last_assigned_id() const {
+ return ids_.last_assigned_id();
+ }
+ size_t GetUsedMemorySize() const;
+
+ private:
+ INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
+ bool is_tracking_objects_; // Whether tracking object moves is needed.
+ List<HeapSnapshot*> snapshots_;
+ // Mapping from snapshots' uids to HeapSnapshot* pointers.
+ HashMap snapshots_uids_;
+ StringsStorage names_;
+ TokenEnumerator* token_enumerator_;
+ // Mapping from HeapObject addresses to objects' uids.
+ HeapObjectsMap ids_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
+};
+
+
+// A typedef for referencing anything that can be snapshotted living
+// in any kind of heap memory.
+typedef void* HeapThing;
+
+
+// An interface that creates HeapEntries by HeapThings.
+class HeapEntriesAllocator {
+ public:
+ virtual ~HeapEntriesAllocator() { }
+ virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
+};
+
+
+// The HeapEntriesMap instance is used to track a mapping between
+// real heap objects and their representations in heap snapshots.
+class HeapEntriesMap {
+ public:
+ HeapEntriesMap();
+
+ int Map(HeapThing thing);
+ void Pair(HeapThing thing, int entry);
+
+ private:
+ static uint32_t Hash(HeapThing thing) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
+ v8::internal::kZeroHashSeed);
+ }
+ static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
+ return key1 == key2;
+ }
+
+ HashMap entries_;
+
+ friend class HeapObjectsSet;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
+};
+
+
+class HeapObjectsSet {
+ public:
+ HeapObjectsSet();
+ void Clear();
+ bool Contains(Object* object);
+ void Insert(Object* obj);
+ const char* GetTag(Object* obj);
+ void SetTag(Object* obj, const char* tag);
+ bool is_empty() const { return entries_.occupancy() == 0; }
+
+ private:
+ HashMap entries_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
+};
+
+
+// An interface used to populate a snapshot with nodes and edges.
+class SnapshotFillerInterface {
+ public:
+ virtual ~SnapshotFillerInterface() { }
+ virtual HeapEntry* AddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
+ virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
+ virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
+ HeapEntriesAllocator* allocator) = 0;
+ virtual void SetIndexedReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ int index,
+ HeapEntry* child_entry) = 0;
+ virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ HeapEntry* child_entry) = 0;
+ virtual void SetNamedReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ const char* reference_name,
+ HeapEntry* child_entry) = 0;
+ virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
+ int parent_entry,
+ HeapEntry* child_entry) = 0;
+};
+
+
+class SnapshottingProgressReportingInterface {
+ public:
+ virtual ~SnapshottingProgressReportingInterface() { }
+ virtual void ProgressStep() = 0;
+ virtual bool ProgressReport(bool force) = 0;
+};
+
+
+// An implementation of V8 heap graph extractor.
+class V8HeapExplorer : public HeapEntriesAllocator {
+ public:
+ V8HeapExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress,
+ v8::HeapProfiler::ObjectNameResolver* resolver);
+ virtual ~V8HeapExplorer();
+ virtual HeapEntry* AllocateEntry(HeapThing ptr);
+ void AddRootEntries(SnapshotFillerInterface* filler);
+ int EstimateObjectsCount(HeapIterator* iterator);
+ bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+ void TagGlobalObjects();
+
+ static String* GetConstructorName(JSObject* object);
+
+ static HeapObject* const kInternalRootObject;
+
+ private:
+ HeapEntry* AddEntry(HeapObject* object);
+ HeapEntry* AddEntry(HeapObject* object,
+ HeapEntry::Type type,
+ const char* name);
+ const char* GetSystemEntryName(HeapObject* object);
+
+ void ExtractReferences(HeapObject* obj);
+ void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
+ void ExtractJSObjectReferences(int entry, JSObject* js_obj);
+ void ExtractStringReferences(int entry, String* obj);
+ void ExtractContextReferences(int entry, Context* context);
+ void ExtractMapReferences(int entry, Map* map);
+ void ExtractSharedFunctionInfoReferences(int entry,
+ SharedFunctionInfo* shared);
+ void ExtractScriptReferences(int entry, Script* script);
+ void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
+ void ExtractCodeReferences(int entry, Code* code);
+ void ExtractJSGlobalPropertyCellReferences(int entry,
+ JSGlobalPropertyCell* cell);
+ void ExtractClosureReferences(JSObject* js_obj, int entry);
+ void ExtractPropertyReferences(JSObject* js_obj, int entry);
+ void ExtractElementReferences(JSObject* js_obj, int entry);
+ void ExtractInternalReferences(JSObject* js_obj, int entry);
+ bool IsEssentialObject(Object* object);
+ void SetClosureReference(HeapObject* parent_obj,
+ int parent,
+ String* reference_name,
+ Object* child);
+ void SetNativeBindReference(HeapObject* parent_obj,
+ int parent,
+ const char* reference_name,
+ Object* child);
+ void SetElementReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child);
+ void SetInternalReference(HeapObject* parent_obj,
+ int parent,
+ const char* reference_name,
+ Object* child,
+ int field_offset = -1);
+ void SetInternalReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child,
+ int field_offset = -1);
+ void SetHiddenReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child);
+ void SetWeakReference(HeapObject* parent_obj,
+ int parent,
+ int index,
+ Object* child_obj,
+ int field_offset);
+ void SetPropertyReference(HeapObject* parent_obj,
+ int parent,
+ String* reference_name,
+ Object* child,
+ const char* name_format_string = NULL,
+ int field_offset = -1);
+ void SetUserGlobalReference(Object* user_global);
+ void SetRootGcRootsReference();
+ void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+ void SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
+ const char* GetStrongGcSubrootName(Object* object);
+ void TagObject(Object* obj, const char* tag);
+
+ HeapEntry* GetEntry(Object* obj);
+
+ static inline HeapObject* GetNthGcSubrootObject(int delta);
+ static inline int GetGcSubrootOrder(HeapObject* subroot);
+
+ Heap* heap_;
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ SnapshottingProgressReportingInterface* progress_;
+ SnapshotFillerInterface* filler_;
+ HeapObjectsSet objects_tags_;
+ HeapObjectsSet strong_gc_subroot_names_;
+ v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
+
+ static HeapObject* const kGcRootsObject;
+ static HeapObject* const kFirstGcSubrootObject;
+ static HeapObject* const kLastGcSubrootObject;
+
+ friend class IndexedReferencesExtractor;
+ friend class GcSubrootsEnumerator;
+ friend class RootsReferencesExtractor;
+
+ DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
+};
+
+
+class NativeGroupRetainedObjectInfo;
+
+
+// An implementation of retained native objects extractor.
+class NativeObjectsExplorer {
+ public:
+ NativeObjectsExplorer(HeapSnapshot* snapshot,
+ SnapshottingProgressReportingInterface* progress);
+ virtual ~NativeObjectsExplorer();
+ void AddRootEntries(SnapshotFillerInterface* filler);
+ int EstimateObjectsCount();
+ bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
+
+ private:
+ void FillRetainedObjects();
+ void FillImplicitReferences();
+ List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
+ void SetNativeRootReference(v8::RetainedObjectInfo* info);
+ void SetRootNativeRootsReference();
+ void SetWrapperNativeReferences(HeapObject* wrapper,
+ v8::RetainedObjectInfo* info);
+ void VisitSubtreeWrapper(Object** p, uint16_t class_id);
+
+ static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
+ return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
+ v8::internal::kZeroHashSeed);
+ }
+ static bool RetainedInfosMatch(void* key1, void* key2) {
+ return key1 == key2 ||
+ (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
+ reinterpret_cast<v8::RetainedObjectInfo*>(key2));
+ }
+ INLINE(static bool StringsMatch(void* key1, void* key2)) {
+ return strcmp(reinterpret_cast<char*>(key1),
+ reinterpret_cast<char*>(key2)) == 0;
+ }
+
+ NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
+
+ HeapSnapshot* snapshot_;
+ HeapSnapshotsCollection* collection_;
+ SnapshottingProgressReportingInterface* progress_;
+ bool embedder_queried_;
+ HeapObjectsSet in_groups_;
+ // RetainedObjectInfo* -> List<HeapObject*>*
+ HashMap objects_by_info_;
+ HashMap native_groups_;
+ HeapEntriesAllocator* synthetic_entries_allocator_;
+ HeapEntriesAllocator* native_entries_allocator_;
+ // Used during references extraction.
+ SnapshotFillerInterface* filler_;
+
+ static HeapThing const kNativesRootObject;
+
+ friend class GlobalHandlesExtractor;
+
+ DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
+};
+
+
+class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
+ public:
+ HeapSnapshotGenerator(HeapSnapshot* snapshot,
+ v8::ActivityControl* control,
+ v8::HeapProfiler::ObjectNameResolver* resolver,
+ Heap* heap);
+ bool GenerateSnapshot();
+
+ private:
+ bool FillReferences();
+ void ProgressStep();
+ bool ProgressReport(bool force = false);
+ void SetProgressTotal(int iterations_count);
+
+ HeapSnapshot* snapshot_;
+ v8::ActivityControl* control_;
+ V8HeapExplorer v8_heap_explorer_;
+ NativeObjectsExplorer dom_explorer_;
+ // Mapping from HeapThing pointers to HeapEntry* pointers.
+ HeapEntriesMap entries_;
+ // Used during snapshot generation.
+ int progress_counter_;
+ int progress_total_;
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
+};
+
+class OutputStreamWriter;
+
+class HeapSnapshotJSONSerializer {
+ public:
+ explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
+ : snapshot_(snapshot),
+ strings_(ObjectsMatch),
+ next_node_id_(1),
+ next_string_id_(1),
+ writer_(NULL) {
+ }
+ void Serialize(v8::OutputStream* stream);
+
+ private:
+ INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
+ return key1 == key2;
+ }
+
+ INLINE(static uint32_t ObjectHash(const void* key)) {
+ return ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
+ v8::internal::kZeroHashSeed);
+ }
+
+ HeapSnapshot* CreateFakeSnapshot();
+ int GetStringId(const char* s);
+ int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
+ void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
+ void SerializeEdges();
+ void SerializeImpl();
+ void SerializeNode(HeapEntry* entry);
+ void SerializeNodes();
+ void SerializeSnapshot();
+ void SerializeString(const unsigned char* s);
+ void SerializeStrings();
+ void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
+
+ static const int kEdgeFieldsCount;
+ static const int kNodeFieldsCount;
+
+ HeapSnapshot* snapshot_;
+ HashMap strings_;
+ int next_node_id_;
+ int next_string_id_;
+ OutputStreamWriter* writer_;
+
+ friend class HeapSnapshotJSONSerializerEnumerator;
+ friend class HeapSnapshotJSONSerializerIterator;
+
+ DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
+
diff --git a/src/3rdparty/v8/src/heap.cc b/src/3rdparty/v8/src/heap.cc
index ebf3ccd..5b61436 100644
--- a/src/3rdparty/v8/src/heap.cc
+++ b/src/3rdparty/v8/src/heap.cc
@@ -37,7 +37,6 @@
#include "global-handles.h"
#include "heap-profiler.h"
#include "incremental-marking.h"
-#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -117,7 +116,6 @@ Heap::Heap()
allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
- debug_utils_(NULL),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
@@ -129,7 +127,7 @@ Heap::Heap()
amount_of_external_allocated_memory_at_last_global_gc_(0),
old_gen_exhausted_(false),
store_buffer_rebuilder_(store_buffer()),
- hidden_symbol_(NULL),
+ hidden_string_(NULL),
global_gc_prologue_callback_(NULL),
global_gc_epilogue_callback_(NULL),
gc_safe_size_of_old_object_(NULL),
@@ -137,15 +135,18 @@ Heap::Heap()
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
+ low_survival_rate_period_length_(0),
survival_rate_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
- max_gc_pause_(0),
- total_gc_time_ms_(0),
+ max_gc_pause_(0.0),
+ total_gc_time_ms_(0.0),
max_alive_after_gc_(0),
min_in_mutator_(kMaxInt),
alive_after_last_gc_(0),
last_gc_end_timestamp_(0.0),
+ marking_time_(0.0),
+ sweeping_time_(0.0),
store_buffer_(this),
marking_(this),
incremental_marking_(this),
@@ -156,6 +157,9 @@ Heap::Heap()
ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
scavenges_since_last_idle_round_(kIdleScavengeThreshold),
+#ifdef VERIFY_HEAP
+ no_weak_embedded_maps_verification_scope_depth_(0),
+#endif
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL),
@@ -392,7 +396,7 @@ void Heap::PrintShortHeapStatistics() {
this->SizeOfObjects() / KB,
this->Available() / KB,
this->CommittedMemory() / KB);
- PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_);
+ PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_);
}
@@ -439,14 +443,13 @@ void Heap::GarbageCollectionPrologue() {
ReportStatisticsBeforeGC();
#endif // DEBUG
- LiveObjectList::GCPrologue();
store_buffer()->GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
intptr_t total = 0;
- AllSpaces spaces;
+ AllSpaces spaces(this);
for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
total += space->SizeOfObjects();
}
@@ -455,7 +458,7 @@ intptr_t Heap::SizeOfObjects() {
void Heap::RepairFreeListsAfterBoot() {
- PagedSpaces spaces;
+ PagedSpaces spaces(this);
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -466,7 +469,6 @@ void Heap::RepairFreeListsAfterBoot() {
void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
- LiveObjectList::GCEpilogue();
// In release mode, we only zap the from space under heap verification.
if (Heap::ShouldZapGarbage()) {
@@ -490,10 +492,10 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->alive_after_last_gc()->Set(
static_cast<int>(SizeOfObjects()));
- isolate_->counters()->symbol_table_capacity()->Set(
- symbol_table()->Capacity());
+ isolate_->counters()->string_table_capacity()->Set(
+ string_table()->Capacity());
isolate_->counters()->number_of_symbols()->Set(
- symbol_table()->NumberOfElements());
+ string_table()->NumberOfElements());
if (CommittedMemory() > 0) {
isolate_->counters()->external_fragmentation_total()->AddSample(
@@ -550,6 +552,8 @@ void Heap::GarbageCollectionEpilogue() {
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->AfterGarbageCollection();
#endif // ENABLE_DEBUGGER_SUPPORT
+
+ error_object_list_.DeferredFormatStackTrace(isolate());
}
@@ -587,7 +591,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
mark_compact_collector()->SetFlags(kNoGCFlags);
new_space_.Shrink();
UncommitFromSpace();
- Shrink();
incremental_marking()->UncommitMarkingDeque();
}
@@ -615,7 +618,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->abort_incremental_marking_ &&
+ !mark_compact_collector()->abort_incremental_marking() &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
@@ -643,24 +646,24 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
- HistogramTimer* rate = (collector == SCAVENGER)
- ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor();
- rate->Start();
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
- rate->Stop();
-
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+ {
+ HistogramTimerScope histogram_timer_scope(
+ (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
+ : isolate_->counters()->gc_compactor());
+ next_gc_likely_to_collect_more =
+ PerformGarbageCollection(collector, &tracer);
+ }
- // This can do debug callbacks and restart incremental marking.
GarbageCollectionEpilogue();
}
- if (incremental_marking()->IsStopped()) {
- if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
- }
+ // Start incremental marking for the next cycle. The heap snapshot
+ // generator needs incremental marking to stay off after it aborted.
+ if (!mark_compact_collector()->abort_incremental_marking() &&
+ incremental_marking()->IsStopped() &&
+ incremental_marking()->WorthActivating() &&
+ NextGCIsLikelyToBeFull()) {
+ incremental_marking()->Start();
}
return next_gc_likely_to_collect_more;
@@ -677,25 +680,49 @@ void Heap::PerformScavenge() {
}
+void Heap::MoveElements(FixedArray* array,
+ int dst_index,
+ int src_index,
+ int len) {
+ if (len == 0) return;
+
+ ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ Object** dst_objects = array->data_start() + dst_index;
+ memmove(dst_objects,
+ array->data_start() + src_index,
+ len * kPointerSize);
+ if (!InNewSpace(array)) {
+ for (int i = 0; i < len; i++) {
+ // TODO(hpayer): check store buffer for entries
+ if (InNewSpace(dst_objects[i])) {
+ RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
+ }
+ }
+ }
+ incremental_marking()->RecordWrites(array);
+}
+
+
#ifdef VERIFY_HEAP
-// Helper class for verifying the symbol table.
-class SymbolTableVerifier : public ObjectVisitor {
+// Helper class for verifying the string table.
+class StringTableVerifier : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
- // Check that the symbol is actually a symbol.
- CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol());
+ // Check that the string is actually internalized.
+ CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
+ (*p)->IsInternalizedString());
}
}
}
};
-static void VerifySymbolTable() {
- SymbolTableVerifier verifier;
- HEAP->symbol_table()->IterateElements(&verifier);
+static void VerifyStringTable() {
+ StringTableVerifier verifier;
+ HEAP->string_table()->IterateElements(&verifier);
}
#endif // VERIFY_HEAP
@@ -762,11 +789,6 @@ void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
// Committing memory to from space failed.
- // Try shrinking and try again.
- Shrink();
- if (new_space_.CommitFromSpaceIfNeeded()) return;
-
- // Committing memory to from space failed again.
// Memory is exhausted and we will die.
V8::FatalProcessOutOfMemory("Committing semi space failed.");
}
@@ -795,7 +817,6 @@ void Heap::ClearJSFunctionResultCaches() {
}
-
void Heap::ClearNormalizedMapCaches() {
if (isolate_->bootstrapper()->IsActive() &&
!incremental_marking()->IsMarking()) {
@@ -856,23 +877,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifySymbolTable();
+ VerifyStringTable();
}
#endif
- if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) {
- ASSERT(!allocation_allowed_);
- GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_prologue_callback_();
- }
-
GCType gc_type =
collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
- for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
- if (gc_type & gc_prologue_callbacks_[i].gc_type) {
- gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
- }
+ {
+ GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
+ VMState state(isolate_, EXTERNAL);
+ CallGCPrologueCallbacks(gc_type);
}
EnsureFromSpaceIsCommitted();
@@ -957,11 +972,16 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
isolate_->counters()->objs_since_last_young()->Set(0);
+ // Callbacks that fire after this point might trigger nested GCs and
+ // restart incremental marking, the assertion can't be moved down.
+ ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+
gc_post_processing_depth_++;
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
- isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+ isolate_->global_handles()->PostGarbageCollectionProcessing(
+ collector, tracer);
}
gc_post_processing_depth_--;
@@ -974,22 +994,15 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
amount_of_external_allocated_memory_;
}
- GCCallbackFlags callback_flags = kNoGCCallbackFlags;
- for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
- if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
- gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
- }
- }
-
- if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
- ASSERT(!allocation_allowed_);
+ {
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
- global_gc_epilogue_callback_();
+ VMState state(isolate_, EXTERNAL);
+ CallGCEpilogueCallbacks(gc_type);
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifySymbolTable();
+ VerifyStringTable();
}
#endif
@@ -997,6 +1010,30 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
}
+void Heap::CallGCPrologueCallbacks(GCType gc_type) {
+ if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
+ global_gc_prologue_callback_();
+ }
+ for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+ if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+ gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ }
+ }
+}
+
+
+void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
+ for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+ if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+ gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
+ }
+ }
+ if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
+ global_gc_epilogue_callback_();
+ }
+}
+
+
void Heap::MarkCompact(GCTracer* tracer) {
gc_state_ = MARK_COMPACT;
LOG(isolate_, ResourceEvent("markcompact", "begin"));
@@ -1268,7 +1305,8 @@ void Heap::Scavenge() {
incremental_marking()->PrepareForScavenge();
- AdvanceSweepers(static_cast<int>(new_space_.Size()));
+ paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+ paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -1334,10 +1372,12 @@ void Heap::Scavenge() {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- while (IterateObjectGroups(&scavenge_visitor)) {
+ while (isolate()->global_handles()->IterateObjectGroups(
+ &scavenge_visitor, &IsUnscavengedHeapObject)) {
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
}
isolate()->global_handles()->RemoveObjectGroups();
+ isolate()->global_handles()->RemoveImplicitRefGroups();
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
@@ -1348,9 +1388,10 @@ void Heap::Scavenge() {
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ error_object_list_.UpdateReferencesInNewSpace(this);
+
promotion_queue_.Destroy();
- LiveObjectList::UpdateReferencesForScavengeGC();
if (!FLAG_watch_ic_patching) {
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
}
@@ -1379,53 +1420,9 @@ void Heap::Scavenge() {
}
-// TODO(mstarzinger): Unify this method with
-// MarkCompactCollector::MarkObjectGroups().
-bool Heap::IterateObjectGroups(ObjectVisitor* scavenge_visitor) {
- List<ObjectGroup*>* object_groups =
- isolate()->global_handles()->object_groups();
-
- int last = 0;
- bool changed = false;
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_marked = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- if (!IsUnscavengedHeapObject(this, &object)) {
- group_marked = true;
- break;
- }
- }
- }
-
- if (!group_marked) {
- (*object_groups)[last++] = entry;
- continue;
- }
-
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- scavenge_visitor->VisitPointer(&object);
- changed = true;
- }
- }
-
- entry->Dispose();
- object_groups->at(i) = NULL;
- }
- object_groups->Rewind(last);
- return changed;
-}
-
-
-HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
- Object** p) {
+HeapObject* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap,
+ Object** p) {
MapWord first_word = HeapObject::cast(*p)->map_word();
if (!first_word.IsForwardingAddress()) {
@@ -1459,7 +1456,8 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
if (target == NULL) continue;
- ASSERT(target->IsExternalString() || target->map()->has_external_resource());
+ ASSERT(target->IsExternalString() ||
+ target->map()->has_external_resource());
if (InNewSpace(target)) {
// String is still in new space. Update the table entry.
@@ -1623,10 +1621,11 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
- // Both the external string table and the symbol table may contain
+ // Both the external string table and the string table may contain
// external strings, but neither lists them exhaustively, nor is the
// intersection set empty. Therefore we iterate over the external string
- // table first, ignoring symbols, and then over the symbol table.
+ // table first, ignoring internalized strings, and then over the
+ // internalized string table.
class ExternalStringTableVisitorAdapter : public ObjectVisitor {
public:
@@ -1634,9 +1633,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
- // Visit non-symbol external strings,
- // since symbols are listed in the symbol table.
- if (!(*p)->IsSymbol()) {
+ // Visit non-internalized external strings,
+ // since internalized strings are listed in the string table.
+ if (!(*p)->IsInternalizedString()) {
ASSERT((*p)->IsExternalString());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
@@ -1649,14 +1648,14 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.Iterate(&external_string_table_visitor);
- class SymbolTableVisitorAdapter : public ObjectVisitor {
+ class StringTableVisitorAdapter : public ObjectVisitor {
public:
- explicit SymbolTableVisitorAdapter(
+ explicit StringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
- ASSERT((*p)->IsSymbol());
+ ASSERT((*p)->IsInternalizedString());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
@@ -1664,9 +1663,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
private:
v8::ExternalResourceVisitor* visitor_;
- } symbol_table_visitor(visitor);
+ } string_table_visitor(visitor);
- symbol_table()->IterateElements(&symbol_table_visitor);
+ string_table()->IterateElements(&string_table_visitor);
}
@@ -1763,7 +1762,7 @@ template<MarksHandling marks_handling,
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
- table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
@@ -2007,11 +2006,11 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- static inline void EvacuateSeqAsciiString(Map* map,
+ static inline void EvacuateSeqOneByteString(Map* map,
HeapObject** slot,
HeapObject* object) {
- int object_size = SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
+ int object_size = SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2203,6 +2202,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+ map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
map->init_back_pointer(undefined_value());
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
@@ -2274,11 +2275,11 @@ const Heap::StringTypeTable Heap::string_type_table[] = {
};
-const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = {
-#define CONSTANT_SYMBOL_ELEMENT(name, contents) \
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+#define CONSTANT_STRING_ELEMENT(name, contents) \
{contents, k##name##RootIndex},
- SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT)
-#undef CONSTANT_SYMBOL_ELEMENT
+ INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
};
@@ -2338,14 +2339,18 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
+ meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
meta_map()->init_back_pointer(undefined_value());
meta_map()->set_instance_descriptors(empty_descriptor_array());
fixed_array_map()->set_code_cache(empty_fixed_array());
+ fixed_array_map()->set_dependent_code(
+ DependentCode::cast(empty_fixed_array()));
fixed_array_map()->init_back_pointer(undefined_value());
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
oddball_map()->set_code_cache(empty_fixed_array());
+ oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
oddball_map()->init_back_pointer(undefined_value());
oddball_map()->set_instance_descriptors(empty_descriptor_array());
@@ -2377,6 +2382,11 @@ bool Heap::CreateInitialMaps() {
}
set_heap_number_map(Map::cast(obj));
+ { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
+ if (!maybe_obj->ToObject(&obj)) return false;
+ }
+ set_symbol_map(Map::cast(obj));
+
{ MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
@@ -2578,6 +2588,14 @@ bool Heap::CreateInitialMaps() {
}
set_message_object_map(Map::cast(obj));
+ Map* external_map;
+ { MaybeObject* maybe_obj =
+ AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
+ if (!maybe_obj->To(&external_map)) return false;
+ }
+ external_map->set_is_extensible(false);
+ set_external_map(external_map);
+
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -2673,13 +2691,13 @@ bool Heap::CreateApiObjects() {
void Heap::CreateJSEntryStub() {
JSEntryStub stub;
- set_js_entry_code(*stub.GetCode());
+ set_js_entry_code(*stub.GetCode(isolate()));
}
void Heap::CreateJSConstructEntryStub() {
JSConstructEntryStub stub;
- set_js_construct_entry_code(*stub.GetCode());
+ set_js_construct_entry_code(*stub.GetCode(isolate()));
}
@@ -2688,7 +2706,7 @@ void Heap::CreateFixedStubs() {
// for cooking and uncooking (check out frames.cc).
// The eliminates the need for doing dictionary lookup in the
// stub cache for these stubs.
- HandleScope scope;
+ HandleScope scope(isolate());
// gcc-4.4 has problem generating correct code of following snippet:
// { JSEntryStub stub;
// js_entry_code_ = *stub.GetCode();
@@ -2704,7 +2722,7 @@ void Heap::CreateFixedStubs() {
// create them if we need them during the creation of another stub.
// Stub creation mixes raw pointers and handles in an unsafe manner so
// we cannot create stubs while we are creating stubs.
- CodeStub::GenerateStubsAheadOfTime();
+ CodeStub::GenerateStubsAheadOfTime(isolate());
}
@@ -2729,17 +2747,17 @@ bool Heap::CreateInitialObjects() {
set_infinity_value(HeapNumber::cast(obj));
// The hole has not been created yet, but we want to put something
- // predictable in the gaps in the symbol table, so lets make that Smi zero.
+ // predictable in the gaps in the string table, so lets make that Smi zero.
set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
- // Allocate initial symbol table.
- { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
+ // Allocate initial string table.
+ { MaybeObject* maybe_obj = StringTable::Allocate(kInitialStringTableSize);
if (!maybe_obj->ToObject(&obj)) return false;
}
- // Don't use set_symbol_table() due to asserts.
- roots_[kSymbolTableRootIndex] = obj;
+ // Don't use set_string_table() due to asserts.
+ roots_[kStringTableRootIndex] = obj;
- // Finish initializing oddballs after creating symboltable.
+ // Finish initializing oddballs after creating the string table.
{ MaybeObject* maybe_obj =
undefined_value()->Initialize("undefined",
nan_value(),
@@ -2795,31 +2813,25 @@ bool Heap::CreateInitialObjects() {
}
set_termination_exception(obj);
- // Allocate the empty string.
- { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_empty_string(String::cast(obj));
-
- for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
+ for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
{ MaybeObject* maybe_obj =
- LookupAsciiSymbol(constant_symbol_table[i].contents);
+ InternalizeUtf8String(constant_string_table[i].contents);
if (!maybe_obj->ToObject(&obj)) return false;
}
- roots_[constant_symbol_table[i].index] = String::cast(obj);
+ roots_[constant_string_table[i].index] = String::cast(obj);
}
- // Allocate the hidden symbol which is used to identify the hidden properties
+ // Allocate the hidden string which is used to identify the hidden properties
// in JSObjects. The hash code has a special value so that it will not match
// the empty string when searching for the property. It cannot be part of the
// loop above because it needs to be allocated manually with the special
- // hash code in place. The hash code for the hidden_symbol is zero to ensure
+ // hash code in place. The hash code for the hidden_string is zero to ensure
// that it will always be at the first entry in property descriptors.
- { MaybeObject* maybe_obj =
- AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash);
+ { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
+ OneByteVector("", 0), String::kEmptyStringHash);
if (!maybe_obj->ToObject(&obj)) return false;
}
- hidden_symbol_ = String::cast(obj);
+ hidden_string_ = String::cast(obj);
// Allocate the foreign for __proto__.
{ MaybeObject* maybe_obj =
@@ -2869,9 +2881,9 @@ bool Heap::CreateInitialObjects() {
}
set_number_string_cache(FixedArray::cast(obj));
- // Allocate cache for single character ASCII strings.
+ // Allocate cache for single character one byte strings.
{ MaybeObject* maybe_obj =
- AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED);
+ AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_single_character_string_cache(FixedArray::cast(obj));
@@ -2927,6 +2939,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
RootListIndex writable_roots[] = {
kStoreBufferTopRootIndex,
kStackLimitRootIndex,
+ kNumberStringCacheRootIndex,
kInstanceofCacheFunctionRootIndex,
kInstanceofCacheMapRootIndex,
kInstanceofCacheAnswerRootIndex,
@@ -2940,7 +2953,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
kConstructStubDeoptPCOffsetRootIndex,
kGetterStubDeoptPCOffsetRootIndex,
kSetterStubDeoptPCOffsetRootIndex,
- kSymbolTableRootIndex,
+ kStringTableRootIndex,
};
for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
@@ -2956,10 +2969,10 @@ Object* RegExpResultsCache::Lookup(Heap* heap,
Object* key_pattern,
ResultsCacheType type) {
FixedArray* cache;
- if (!key_string->IsSymbol()) return Smi::FromInt(0);
+ if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
if (type == STRING_SPLIT_SUBSTRINGS) {
ASSERT(key_pattern->IsString());
- if (!key_pattern->IsSymbol()) return Smi::FromInt(0);
+ if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
cache = heap->string_split_cache();
} else {
ASSERT(type == REGEXP_MULTIPLE_INDICES);
@@ -2990,10 +3003,10 @@ void RegExpResultsCache::Enter(Heap* heap,
FixedArray* value_array,
ResultsCacheType type) {
FixedArray* cache;
- if (!key_string->IsSymbol()) return;
+ if (!key_string->IsInternalizedString()) return;
if (type == STRING_SPLIT_SUBSTRINGS) {
ASSERT(key_pattern->IsString());
- if (!key_pattern->IsSymbol()) return;
+ if (!key_pattern->IsInternalizedString()) return;
cache = heap->string_split_cache();
} else {
ASSERT(type == REGEXP_MULTIPLE_INDICES);
@@ -3025,14 +3038,14 @@ void RegExpResultsCache::Enter(Heap* heap,
}
}
// If the array is a reasonably short list of substrings, convert it into a
- // list of symbols.
+ // list of internalized strings.
if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
for (int i = 0; i < value_array->length(); i++) {
String* str = String::cast(value_array->get(i));
- Object* symbol;
- MaybeObject* maybe_symbol = heap->LookupSymbol(str);
- if (maybe_symbol->ToObject(&symbol)) {
- value_array->set(i, symbol);
+ Object* internalized_str;
+ MaybeObject* maybe_string = heap->InternalizeString(str);
+ if (maybe_string->ToObject(&internalized_str)) {
+ value_array->set(i, internalized_str);
}
}
}
@@ -3168,7 +3181,7 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
+ MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -3261,11 +3274,11 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
share->set_code(illegal);
share->ClearOptimizedCodeMap();
- share->set_scope_info(ScopeInfo::Empty());
+ share->set_scope_info(ScopeInfo::Empty(isolate_));
Code* construct_stub =
isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
share->set_construct_stub(construct_stub);
- share->set_instance_class_name(Object_symbol());
+ share->set_instance_class_name(Object_string());
share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
@@ -3329,25 +3342,26 @@ static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
Heap* heap,
- uint32_t c1,
- uint32_t c2) {
- String* symbol;
+ uint16_t c1,
+ uint16_t c2) {
+ String* result;
// Numeric strings have a different hash algorithm not known by
- // LookupTwoCharsSymbolIfExists, so we skip this step for such strings.
+ // LookupTwoCharsStringIfExists, so we skip this step for such strings.
if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
- heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) {
- return symbol;
+ heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
+ return result;
// Now we know the length is 2, we might as well make use of that fact
// when building the new string.
- } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
+ } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
+ // We can do this.
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this.
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- char* dest = SeqAsciiString::cast(result)->GetChars();
- dest[0] = c1;
- dest[1] = c2;
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
+ dest[0] = static_cast<uint8_t>(c1);
+ dest[1] = static_cast<uint8_t>(c2);
return result;
} else {
Object* result;
@@ -3376,27 +3390,26 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
int length = first_length + second_length;
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
+ // dictionary. Check whether we already have the string in the string
// table to prevent creation of many unneccesary strings.
if (length == 2) {
- unsigned c1 = first->Get(0);
- unsigned c2 = second->Get(0);
+ uint16_t c1 = first->Get(0);
+ uint16_t c2 = second->Get(0);
return MakeOrFindTwoCharacterString(this, c1, c2);
}
- bool first_is_ascii = first->IsAsciiRepresentation();
- bool second_is_ascii = second->IsAsciiRepresentation();
- bool is_ascii = first_is_ascii && second_is_ascii;
-
+ bool first_is_one_byte = first->IsOneByteRepresentation();
+ bool second_is_one_byte = second->IsOneByteRepresentation();
+ bool is_one_byte = first_is_one_byte && second_is_one_byte;
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large.
if (length > String::kMaxLength || length < 0) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x4);
}
bool is_ascii_data_in_two_byte_string = false;
- if (!is_ascii) {
+ if (!is_one_byte) {
// At least one of the strings uses two-byte representation so we
// can't use the fast case code for short ASCII strings below, but
// we can try to save memory if all chars actually fit in ASCII.
@@ -3413,37 +3426,37 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
ASSERT(first->IsFlat());
ASSERT(second->IsFlat());
- if (is_ascii) {
+ if (is_one_byte) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
+ { MaybeObject* maybe_result = AllocateRawOneByteString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
// Copy first part.
- const char* src;
+ const uint8_t* src;
if (first->IsExternalString()) {
src = ExternalAsciiString::cast(first)->GetChars();
} else {
- src = SeqAsciiString::cast(first)->GetChars();
+ src = SeqOneByteString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
src = ExternalAsciiString::cast(second)->GetChars();
} else {
- src = SeqAsciiString::cast(second)->GetChars();
+ src = SeqOneByteString::cast(second)->GetChars();
}
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
if (is_ascii_data_in_two_byte_string) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawAsciiString(length);
+ { MaybeObject* maybe_result = AllocateRawOneByteString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
@@ -3462,7 +3475,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
}
}
- Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ?
+ Map* map = (is_one_byte || is_ascii_data_in_two_byte_string) ?
cons_ascii_string_map() : cons_string_map();
Object* result;
@@ -3492,10 +3505,10 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
return LookupSingleCharacterStringFromCode(buffer->Get(start));
} else if (length == 2) {
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. Check whether we already have the string in the symbol
- // table to prevent creation of many unneccesary strings.
- unsigned c1 = buffer->Get(start);
- unsigned c2 = buffer->Get(start + 1);
+ // dictionary. Check whether we already have the string in the string
+ // table to prevent creation of many unnecessary strings.
+ uint16_t c1 = buffer->Get(start);
+ uint16_t c2 = buffer->Get(start + 1);
return MakeOrFindTwoCharacterString(this, c1, c2);
}
@@ -3510,17 +3523,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// WriteToFlat takes care of the case when an indirect string has a
// different encoding from its underlying string. These encodings may
// differ because of externalization.
- bool is_ascii = buffer->IsAsciiRepresentation();
- { MaybeObject* maybe_result = is_ascii
- ? AllocateRawAsciiString(length, pretenure)
+ bool is_one_byte = buffer->IsOneByteRepresentation();
+ { MaybeObject* maybe_result = is_one_byte
+ ? AllocateRawOneByteString(length, pretenure)
: AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
- if (is_ascii) {
- ASSERT(string_result->IsAsciiRepresentation());
- char* dest = SeqAsciiString::cast(string_result)->GetChars();
+ if (is_one_byte) {
+ ASSERT(string_result->IsOneByteRepresentation());
+ uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
@@ -3544,7 +3557,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// indirect ASCII string is pointing to a two-byte string, the two-byte char
// codes of the underlying string must still fit into ASCII (because
// externalization must not change char codes).
- { Map* map = buffer->IsAsciiRepresentation()
+ { Map* map = buffer->IsOneByteRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3580,10 +3593,12 @@ MaybeObject* Heap::AllocateExternalStringFromAscii(
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x5);
}
+#ifndef ENABLE_LATIN_1
ASSERT(String::IsAscii(resource->data(), static_cast<int>(length)));
+#endif // ENABLE_LATIN_1
Map* map = external_ascii_string_map();
Object* result;
@@ -3605,15 +3620,15 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
size_t length = resource->length();
if (length > static_cast<size_t>(String::kMaxLength)) {
isolate()->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x6);
}
// For small strings we check whether the resource contains only
- // ASCII characters. If yes, we use a different string map.
+ // one byte characters. If yes, we use a different string map.
static const size_t kAsciiCheckLengthLimit = 32;
- bool is_ascii = length <= kAsciiCheckLengthLimit &&
- String::IsAscii(resource->data(), static_cast<int>(length));
- Map* map = is_ascii ?
+ bool is_one_byte = length <= kAsciiCheckLengthLimit &&
+ String::IsOneByte(resource->data(), static_cast<int>(length));
+ Map* map = is_one_byte ?
external_string_with_ascii_data_map() : external_string_map();
Object* result;
{ MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3630,14 +3645,15 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte(
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
- if (code <= String::kMaxAsciiCharCode) {
+ if (code <= String::kMaxOneByteCharCode) {
Object* value = single_character_string_cache()->get(code);
if (value != undefined_value()) return value;
- char buffer[1];
- buffer[0] = static_cast<char>(code);
+ uint8_t buffer[1];
+ buffer[0] = static_cast<uint8_t>(code);
Object* result;
- MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1));
+ MaybeObject* maybe_result =
+ InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
if (!maybe_result->ToObject(&result)) return maybe_result;
single_character_string_cache()->set(code, result);
@@ -3656,7 +3672,7 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x7);
}
if (pretenure == NOT_TENURED) {
return AllocateByteArray(length);
@@ -3678,7 +3694,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateByteArray(int length) {
if (length < 0 || length > ByteArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x8);
}
int size = ByteArray::SizeFor(length);
AllocationSpace space =
@@ -3779,10 +3795,14 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
+ code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
+ code->set_prologue_offset(kPrologueOffsetNotSet);
+ if (code->kind() == Code::OPTIMIZED_FUNCTION) {
+ code->set_marked_for_deoptimization(false);
+ }
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -3886,6 +3906,28 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
}
+MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
+ Handle<Object> allocation_site_info_payload) {
+ ASSERT(gc_state_ == NOT_IN_GC);
+ ASSERT(map->instance_type() != MAP_TYPE);
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space =
+ (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size() + AllocationSiteInfo::kSize;
+ Object* result;
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
+ // No need for write barrier since object is white and map is in old space.
+ HeapObject::cast(result)->set_map_no_write_barrier(map);
+ AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
+ reinterpret_cast<Address>(result) + map->instance_size());
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
+ return result;
+}
+
+
MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
@@ -3893,11 +3935,10 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
// space when new space is full and the object is not a large object.
AllocationSpace retry_space =
(space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ int size = map->instance_size();
Object* result;
- { MaybeObject* maybe_result =
- AllocateRaw(map->instance_size(), space, retry_space);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
+ if (!maybe_result->ToObject(&result)) return maybe_result;
// No need for write barrier since object is white and map is in old space.
HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
@@ -3942,7 +3983,7 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// constructor to the function.
MaybeObject* maybe_failure =
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_symbol(), function, DONT_ENUM);
+ constructor_string(), function, DONT_ENUM);
if (maybe_failure->IsFailure()) return maybe_failure;
return prototype;
@@ -4081,7 +4122,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
DescriptorArray::WhitenessWitness witness(descriptors);
for (int i = 0; i < count; i++) {
String* name = fun->shared()->GetThisPropertyAssignmentName(i);
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
FieldDescriptor field(name, i, NONE, i + 1);
descriptors->Set(i, &field, witness);
}
@@ -4165,15 +4206,53 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
Object* obj;
- { MaybeObject* maybe_obj = Allocate(map, space);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ MaybeObject* maybe_obj = Allocate(map, space);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
+ // Initialize the JSObject.
+ InitializeJSObjectFromMap(JSObject::cast(obj),
+ FixedArray::cast(properties),
+ map);
+ ASSERT(JSObject::cast(obj)->HasFastElements());
+ return obj;
+}
+
+
+MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
+ Handle<Object> allocation_site_info_payload) {
+ // JSFunctions should be allocated using AllocateFunction to be
+ // properly initialized.
+ ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
+
+ // Both types of global objects should be allocated using
+ // AllocateGlobalObject to be properly initialized.
+ ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+ ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+ // Allocate the backing storage for the properties.
+ int prop_size =
+ map->pre_allocated_property_fields() +
+ map->unused_property_fields() -
+ map->inobject_properties();
+ ASSERT(prop_size >= 0);
+ Object* properties;
+ { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
+ if (!maybe_properties->ToObject(&properties)) return maybe_properties;
}
+ // Allocate the JSObject.
+ AllocationSpace space = NEW_SPACE;
+ if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
+ Object* obj;
+ MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
+ allocation_site_info_payload);
+ if (!maybe_obj->To(&obj)) return maybe_obj;
+
// Initialize the JSObject.
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
+ ASSERT(JSObject::cast(obj)->HasFastElements());
return obj;
}
@@ -4201,6 +4280,51 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
}
+MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
+ Handle<Object> allocation_site_info_payload) {
+ // Allocate the initial map if absent.
+ if (!constructor->has_initial_map()) {
+ Object* initial_map;
+ { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
+ if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
+ }
+ constructor->set_initial_map(Map::cast(initial_map));
+ Map::cast(initial_map)->set_constructor(constructor);
+ }
+ // Allocate the object based on the constructors initial map, or the payload
+ // advice
+ Map* initial_map = constructor->initial_map();
+
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(
+ *allocation_site_info_payload);
+ Smi* smi = Smi::cast(cell->value());
+ ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
+ AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
+ if (to_kind != initial_map->elements_kind()) {
+ MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap(
+ isolate(), to_kind);
+ if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
+ // Possibly alter the mode, since we found an updated elements kind
+ // in the type info cell.
+ mode = AllocationSiteInfo::GetMode(to_kind);
+ }
+
+ MaybeObject* result;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
+ allocation_site_info_payload);
+ } else {
+ result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
+ }
+#ifdef DEBUG
+ // Make sure result is NOT a global object if valid.
+ Object* non_failure;
+ ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
+#endif
+ return result;
+}
+
+
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@@ -4222,13 +4346,66 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
int capacity,
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
+ MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
+ JSArray* array;
+ if (!maybe_array->To(&array)) return maybe_array;
+
+ // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
+ // for performance reasons.
ASSERT(capacity >= length);
- if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
- elements_kind = GetHoleyElementsKind(elements_kind);
+
+ if (capacity == 0) {
+ array->set_length(Smi::FromInt(0));
+ array->set_elements(empty_fixed_array());
+ return array;
}
- MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
+
+ FixedArrayBase* elms;
+ MaybeObject* maybe_elms = NULL;
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
+ }
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+ if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
+ maybe_elms = AllocateUninitializedFixedArray(capacity);
+ } else {
+ ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+ maybe_elms = AllocateFixedArrayWithHoles(capacity);
+ }
+ }
+ if (!maybe_elms->To(&elms)) return maybe_elms;
+
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(length));
+ return array;
+}
+
+
+MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
+ ElementsKind elements_kind,
+ int length,
+ int capacity,
+ Handle<Object> allocation_site_payload,
+ ArrayStorageAllocationMode mode) {
+ MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
+ allocation_site_payload);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
+ return AllocateJSArrayStorage(array, length, capacity, mode);
+}
+
+
+MaybeObject* Heap::AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode) {
+ ASSERT(capacity >= length);
if (capacity == 0) {
array->set_length(Smi::FromInt(0));
@@ -4238,7 +4415,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (IsFastDoubleElementsKind(elements_kind)) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@@ -4265,13 +4443,14 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
- array->set_length(Smi::FromInt(elements->length()));
+ array->set_length(Smi::FromInt(length));
array->ValidateElements();
return array;
}
@@ -4420,6 +4599,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
(object_size - JSObject::kHeaderSize) / kPointerSize);
} else {
wb_mode = SKIP_WRITE_BARRIER;
+
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
@@ -4463,6 +4643,113 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) {
}
+MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
+ // Never used to copy functions. If functions need to be copied we
+ // have to be careful to clear the literals array.
+ SLOW_ASSERT(!source->IsJSFunction());
+
+ // Make the clone.
+ Map* map = source->map();
+ int object_size = map->instance_size();
+ Object* clone;
+
+ ASSERT(map->CanTrackAllocationSite());
+ ASSERT(map->instance_type() == JS_ARRAY_TYPE);
+ WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
+ // If we're forced to always allocate, we use the general allocation
+ // functions which may leave us with an object in old space.
+ int adjusted_object_size = object_size;
+ if (always_allocate()) {
+ // We'll only track origin if we are certain to allocate in new space
+ const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
+ if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
+ adjusted_object_size += AllocationSiteInfo::kSize;
+ }
+
+ { MaybeObject* maybe_clone =
+ AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
+ if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ }
+ Address clone_address = HeapObject::cast(clone)->address();
+ CopyBlock(clone_address,
+ source->address(),
+ object_size);
+ // Update write barrier for all fields that lie beyond the header.
+ int write_barrier_offset = adjusted_object_size > object_size
+ ? JSArray::kSize + AllocationSiteInfo::kSize
+ : JSObject::kHeaderSize;
+ if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
+ RecordWrites(clone_address,
+ write_barrier_offset,
+ (object_size - write_barrier_offset) / kPointerSize);
+ }
+
+ // Track allocation site information, if we failed to allocate it inline.
+ if (InNewSpace(clone) &&
+ adjusted_object_size == object_size) {
+ MaybeObject* maybe_alloc_info =
+ AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
+ AllocationSiteInfo* alloc_info;
+ if (maybe_alloc_info->To(&alloc_info)) {
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
+ }
+ }
+ } else {
+ wb_mode = SKIP_WRITE_BARRIER;
+ adjusted_object_size += AllocationSiteInfo::kSize;
+
+ { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
+ if (!maybe_clone->ToObject(&clone)) return maybe_clone;
+ }
+ SLOW_ASSERT(InNewSpace(clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ CopyBlock(HeapObject::cast(clone)->address(),
+ source->address(),
+ object_size);
+ }
+
+ if (adjusted_object_size > object_size) {
+ AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ alloc_info->set_map_no_write_barrier(allocation_site_info_map());
+ alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
+ }
+
+ SLOW_ASSERT(
+ JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
+ FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+ FixedArray* properties = FixedArray::cast(source->properties());
+ // Update elements if necessary.
+ if (elements->length() > 0) {
+ Object* elem;
+ { MaybeObject* maybe_elem;
+ if (elements->map() == fixed_cow_array_map()) {
+ maybe_elem = FixedArray::cast(elements);
+ } else if (source->HasFastDoubleElements()) {
+ maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+ } else {
+ maybe_elem = CopyFixedArray(FixedArray::cast(elements));
+ }
+ if (!maybe_elem->ToObject(&elem)) return maybe_elem;
+ }
+ JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
+ }
+ // Update properties if necessary.
+ if (properties->length() > 0) {
+ Object* prop;
+ { MaybeObject* maybe_prop = CopyFixedArray(properties);
+ if (!maybe_prop->ToObject(&prop)) return maybe_prop;
+ }
+ JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
+ }
+ // Return the new clone.
+ return clone;
+}
+
+
MaybeObject* Heap::ReinitializeJSReceiver(
JSReceiver* object, InstanceType type, int size) {
ASSERT(type >= FIRST_JS_OBJECT_TYPE);
@@ -4489,7 +4776,8 @@ MaybeObject* Heap::ReinitializeJSReceiver(
SharedFunctionInfo* shared = NULL;
if (type == JS_FUNCTION_TYPE) {
String* name;
- maybe = LookupAsciiSymbol("<freezing call trap>");
+ maybe =
+ InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
if (!maybe->To<String>(&name)) return maybe;
maybe = AllocateSharedFunctionInfo(name);
if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
@@ -4549,7 +4837,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
+MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
@@ -4557,12 +4845,14 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
}
Object* result;
{ MaybeObject* maybe_result =
- AllocateRawAsciiString(string.length(), pretenure);
+ AllocateRawOneByteString(string.length(), pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
+ CopyChars(SeqOneByteString::cast(result)->GetChars(),
+ string.start(),
+ length);
return result;
}
@@ -4572,37 +4862,31 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
PretenureFlag pretenure) {
// Continue counting the number of characters in the UTF-8 string, starting
// from the first non-ascii character or word.
- int chars = non_ascii_start;
Access<UnicodeCache::Utf8Decoder>
decoder(isolate_->unicode_cache()->utf8_decoder());
- decoder->Reset(string.start() + non_ascii_start, string.length() - chars);
- while (decoder->has_more()) {
- uint32_t r = decoder->GetNext();
- if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
- chars++;
- } else {
- chars += 2;
- }
- }
-
+ decoder->Reset(string.start() + non_ascii_start,
+ string.length() - non_ascii_start);
+ int utf16_length = decoder->Utf16Length();
+ ASSERT(utf16_length > 0);
+ // Allocate string.
Object* result;
- { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
+ {
+ int chars = non_ascii_start + utf16_length;
+ MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
-
// Convert and copy the characters into the new object.
SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
- decoder->Reset(string.start(), string.length());
- int i = 0;
- while (i < chars) {
- uint32_t r = decoder->GetNext();
- if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r));
- twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r));
- } else {
- twobyte->SeqTwoByteStringSet(i++, r);
+ // Copy ascii portion.
+ uint16_t* data = twobyte->GetChars();
+ if (non_ascii_start != 0) {
+ const char* ascii_data = string.start();
+ for (int i = 0; i < non_ascii_start; i++) {
+ *data++ = *ascii_data++;
}
}
+ // Now write the remainder.
+ decoder->WriteUtf16(data, utf16_length);
return result;
}
@@ -4614,11 +4898,11 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
int length = string.length();
const uc16* start = string.start();
- if (String::IsAscii(start, length)) {
- MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
+ if (String::IsOneByte(start, length)) {
+ MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
- } else { // It's not an ASCII string.
+ CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
+ } else { // It's not a one byte string.
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
@@ -4627,61 +4911,98 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
}
-Map* Heap::SymbolMapForString(String* string) {
- // If the string is in new space it cannot be used as a symbol.
+Map* Heap::InternalizedStringMapForString(String* string) {
+ // If the string is in new space it cannot be used as internalized.
if (InNewSpace(string)) return NULL;
- // Find the corresponding symbol map for strings.
+ // Find the corresponding internalized string map for strings.
switch (string->map()->instance_type()) {
- case STRING_TYPE: return symbol_map();
- case ASCII_STRING_TYPE: return ascii_symbol_map();
- case CONS_STRING_TYPE: return cons_symbol_map();
- case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map();
- case EXTERNAL_STRING_TYPE: return external_symbol_map();
- case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map();
+ case STRING_TYPE: return internalized_string_map();
+ case ASCII_STRING_TYPE: return ascii_internalized_string_map();
+ case CONS_STRING_TYPE: return cons_internalized_string_map();
+ case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
+ case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
+ case EXTERNAL_ASCII_STRING_TYPE:
+ return external_ascii_internalized_string_map();
case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return external_symbol_with_ascii_data_map();
- case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map();
+ return external_internalized_string_with_ascii_data_map();
+ case SHORT_EXTERNAL_STRING_TYPE:
+ return short_external_internalized_string_map();
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
- return short_external_ascii_symbol_map();
+ return short_external_ascii_internalized_string_map();
case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- return short_external_symbol_with_ascii_data_map();
+ return short_external_internalized_string_with_ascii_data_map();
default: return NULL; // No match found.
}
}
-MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
- int chars,
- uint32_t hash_field) {
- ASSERT(chars >= 0);
- // Ensure the chars matches the number of characters in the buffer.
- ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length());
- // Determine whether the string is ASCII.
- bool is_ascii = true;
- while (buffer->has_more()) {
- if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
- is_ascii = false;
- break;
+static inline void WriteOneByteData(Vector<const char> vector,
+ uint8_t* chars,
+ int len) {
+ // Only works for ascii.
+ ASSERT(vector.length() == len);
+ memcpy(chars, vector.start(), len);
+}
+
+static inline void WriteTwoByteData(Vector<const char> vector,
+ uint16_t* chars,
+ int len) {
+ const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
+ unsigned stream_length = vector.length();
+ while (stream_length != 0) {
+ unsigned consumed = 0;
+ uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+ ASSERT(c != unibrow::Utf8::kBadChar);
+ ASSERT(consumed <= stream_length);
+ stream_length -= consumed;
+ stream += consumed;
+ if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ len -= 2;
+ if (len < 0) break;
+ *chars++ = unibrow::Utf16::LeadSurrogate(c);
+ *chars++ = unibrow::Utf16::TrailSurrogate(c);
+ } else {
+ len -= 1;
+ if (len < 0) break;
+ *chars++ = c;
}
}
- buffer->Rewind();
+ ASSERT(stream_length == 0);
+ ASSERT(len == 0);
+}
+
+static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
+ ASSERT(s->length() == len);
+ String::WriteToFlat(s, chars, 0, len);
+}
+
+static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
+ ASSERT(s->length() == len);
+ String::WriteToFlat(s, chars, 0, len);
+}
+
+
+template<bool is_one_byte, typename T>
+MaybeObject* Heap::AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field) {
+ ASSERT(chars >= 0);
// Compute map and object size.
int size;
Map* map;
- if (is_ascii) {
- if (chars > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ if (is_one_byte) {
+ if (chars > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0x9);
}
- map = ascii_symbol_map();
- size = SeqAsciiString::SizeFor(chars);
+ map = ascii_internalized_string_map();
+ size = SeqOneByteString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xa);
}
- map = symbol_map();
+ map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
@@ -4702,28 +5023,34 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
ASSERT_EQ(size, answer->Size());
- // Fill in the characters.
- int i = 0;
- while (i < chars) {
- uint32_t character = buffer->GetNext();
- if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- answer->Set(i++, unibrow::Utf16::LeadSurrogate(character));
- answer->Set(i++, unibrow::Utf16::TrailSurrogate(character));
- } else {
- answer->Set(i++, character);
- }
+ if (is_one_byte) {
+ WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
+ } else {
+ WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
}
return answer;
}
-MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- if (length < 0 || length > SeqAsciiString::kMaxLength) {
- return Failure::OutOfMemoryException();
+// Need explicit instantiations.
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+ String*, int, uint32_t);
+template
+MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
+ Vector<const char>, int, uint32_t);
+
+
+MaybeObject* Heap::AllocateRawOneByteString(int length,
+ PretenureFlag pretenure) {
+ if (length < 0 || length > SeqOneByteString::kMaxLength) {
+ return Failure::OutOfMemoryException(0xb);
}
- int size = SeqAsciiString::SizeFor(length);
- ASSERT(size <= SeqAsciiString::kMaxSize);
+ int size = SeqOneByteString::SizeFor(length);
+ ASSERT(size <= SeqOneByteString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
@@ -4751,14 +5078,16 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
+#ifndef ENABLE_LATIN_1
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif
+#endif
return result;
}
@@ -4767,7 +5096,7 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
MaybeObject* Heap::AllocateRawTwoByteString(int length,
PretenureFlag pretenure) {
if (length < 0 || length > SeqTwoByteString::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xc);
}
int size = SeqTwoByteString::SizeFor(length);
ASSERT(size <= SeqTwoByteString::kMaxSize);
@@ -4819,6 +5148,25 @@ MaybeObject* Heap::AllocateJSArray(
}
+MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_info_payload) {
+ Context* native_context = isolate()->context()->native_context();
+ JSFunction* array_function = native_context->array_function();
+ Map* map = array_function->initial_map();
+ Object* maybe_map_array = native_context->js_array_maps();
+ if (!maybe_map_array->IsUndefined()) {
+ Object* maybe_transitioned_map =
+ FixedArray::cast(maybe_map_array)->get(elements_kind);
+ if (!maybe_transitioned_map->IsUndefined()) {
+ map = Map::cast(maybe_transitioned_map);
+ }
+ }
+ return AllocateJSObjectFromMapWithAllocationSite(map,
+ allocation_site_info_payload);
+}
+
+
MaybeObject* Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
Object* result;
@@ -4836,7 +5184,7 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
MaybeObject* Heap::AllocateRawFixedArray(int length) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xd);
}
ASSERT(length > 0);
// Use the general function if we're forced to always allocate.
@@ -4912,7 +5260,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) {
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
if (length < 0 || length > FixedArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xe);
}
AllocationSpace space =
@@ -5045,7 +5393,7 @@ MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
PretenureFlag pretenure) {
if (length < 0 || length > FixedDoubleArray::kMaxLength) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0xf);
}
AllocationSpace space =
@@ -5089,6 +5437,34 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
}
+MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) {
+ // Statically ensure that it is safe to allocate symbols in paged spaces.
+ STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+
+ Object* result;
+ MaybeObject* maybe = AllocateRaw(Symbol::kSize, space, OLD_DATA_SPACE);
+ if (!maybe->ToObject(&result)) return maybe;
+
+ HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
+
+ // Generate a random hash value.
+ int hash;
+ int attempts = 0;
+ do {
+ hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
+ attempts++;
+ } while (hash == 0 && attempts < 30);
+ if (hash == 0) hash = 1; // never return 0
+
+ Symbol::cast(result)->set_hash_field(
+ Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+
+ ASSERT(result->IsSymbol());
+ return result;
+}
+
+
MaybeObject* Heap::AllocateNativeContext() {
Object* result;
{ MaybeObject* maybe_result =
@@ -5131,7 +5507,7 @@ MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
- // Context links will be set later.
+ // Instance link will be set later.
context->set_extension(Smi::FromInt(0));
return context;
}
@@ -5222,6 +5598,20 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
}
+MaybeObject* Heap::AllocateExternal(void* value) {
+ Foreign* foreign;
+ { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
+ if (!maybe_result->To(&foreign)) return maybe_result;
+ }
+ JSObject* external;
+ { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
+ if (!maybe_result->To(&external)) return maybe_result;
+ }
+ external->SetInternalField(0, foreign);
+ return external;
+}
+
+
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
@@ -5311,10 +5701,6 @@ bool Heap::IdleNotification(int hint) {
AdvanceIdleIncrementalMarking(step_size);
contexts_disposed_ = 0;
}
- // Make sure that we have no pending context disposals.
- // Take into account that we might have decided to delay full collection
- // because incremental marking is in progress.
- ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
@@ -5335,9 +5721,9 @@ bool Heap::IdleNotification(int hint) {
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
-
if (incremental_marking()->IsStopped()) {
- if (!IsSweepingComplete() &&
+ if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+ !IsSweepingComplete() &&
!AdvanceSweepers(static_cast<int>(step_size))) {
return false;
}
@@ -5448,9 +5834,10 @@ bool Heap::IdleGlobalGC() {
void Heap::Print() {
if (!HasBeenSetUp()) return;
isolate()->PrintStack();
- AllSpaces spaces;
- for (Space* space = spaces.next(); space != NULL; space = spaces.next())
+ AllSpaces spaces(this);
+ for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
space->Print();
+ }
}
@@ -5479,7 +5866,7 @@ void Heap::ReportHeapStatistics(const char* title) {
PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
PrintF("\n");
- PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
+ PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
isolate_->global_handles()->PrintStats();
PrintF("\n");
@@ -5576,93 +5963,93 @@ void Heap::Verify() {
#endif
-MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupSymbol(string, &symbol);
+ string_table()->LookupUtf8String(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupAsciiSymbol(string, &symbol);
+ string_table()->LookupOneByteString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
+MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
int from,
int length) {
- Object* symbol = NULL;
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupSubStringAsciiSymbol(string,
+ string_table()->LookupSubStringOneByteString(string,
from,
length,
- &symbol);
+ &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) {
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupTwoByteSymbol(string, &symbol);
+ string_table()->LookupTwoByteString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-MaybeObject* Heap::LookupSymbol(String* string) {
- if (string->IsSymbol()) return string;
- Object* symbol = NULL;
+MaybeObject* Heap::InternalizeString(String* string) {
+ if (string->IsInternalizedString()) return string;
+ Object* result = NULL;
Object* new_table;
{ MaybeObject* maybe_new_table =
- symbol_table()->LookupString(string, &symbol);
+ string_table()->LookupString(string, &result);
if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
}
- // Can't use set_symbol_table because SymbolTable::cast knows that
- // SymbolTable is a singleton and checks for identity.
- roots_[kSymbolTableRootIndex] = new_table;
- ASSERT(symbol != NULL);
- return symbol;
+ // Can't use set_string_table because StringTable::cast knows that
+ // StringTable is a singleton and checks for identity.
+ roots_[kStringTableRootIndex] = new_table;
+ ASSERT(result != NULL);
+ return result;
}
-bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
- if (string->IsSymbol()) {
- *symbol = string;
+bool Heap::InternalizeStringIfExists(String* string, String** result) {
+ if (string->IsInternalizedString()) {
+ *result = string;
return true;
}
- return symbol_table()->LookupSymbolIfExists(string, symbol);
+ return string_table()->LookupStringIfExists(string, result);
}
@@ -5890,12 +6277,13 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- v->Synchronize(VisitorSynchronization::kSymbolTable);
+ v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+ v->Synchronize(VisitorSynchronization::kStringTable);
if (mode != VISIT_ALL_IN_SCAVENGE &&
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
+ error_object_list_.Iterate(v);
}
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
@@ -5905,8 +6293,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
- v->Synchronize(VisitorSynchronization::kSymbol);
+ v->VisitPointer(BitCast<Object**>(&hidden_string_));
+ v->Synchronize(VisitorSynchronization::kInternalizedString);
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
@@ -6077,7 +6465,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->os_error = OS::GetLastError();
isolate()->memory_allocator()->Available();
if (take_snapshot) {
- HeapIterator iterator;
+ HeapIterator iterator(this);
for (HeapObject* obj = iterator.next();
obj != NULL;
obj = iterator.next()) {
@@ -6107,172 +6495,6 @@ intptr_t Heap::PromotedExternalMemorySize() {
- amount_of_external_allocated_memory_at_last_global_gc_;
}
-#ifdef DEBUG
-
-// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
-static const int kMarkTag = 2;
-
-
-class HeapDebugUtils {
- public:
- explicit HeapDebugUtils(Heap* heap)
- : search_for_any_global_(false),
- search_target_(NULL),
- found_target_(false),
- object_stack_(20),
- heap_(heap) {
- }
-
- class MarkObjectVisitor : public ObjectVisitor {
- public:
- explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- void MarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (!map->IsHeapObject()) return; // visited before
-
- if (found_target_) return; // stop if target found
- object_stack_.Add(obj);
- if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
- (!search_for_any_global_ && (obj == search_target_))) {
- found_target_ = true;
- return;
- }
-
- // not visited yet
- Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
-
- Address map_addr = map_p->address();
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
-
- MarkObjectRecursively(&map);
-
- MarkObjectVisitor mark_visitor(this);
-
- obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
- &mark_visitor);
-
- if (!found_target_) // don't pop if found the target
- object_stack_.RemoveLast();
- }
-
-
- class UnmarkObjectVisitor : public ObjectVisitor {
- public:
- explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->UnmarkObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
-
- void UnmarkObjectRecursively(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* obj = HeapObject::cast(*p);
-
- Object* map = obj->map();
-
- if (map->IsHeapObject()) return; // unmarked already
-
- Address map_addr = reinterpret_cast<Address>(map);
-
- map_addr -= kMarkTag;
-
- ASSERT_TAG_ALIGNED(map_addr);
-
- HeapObject* map_p = HeapObject::FromAddress(map_addr);
-
- obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
-
- UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
-
- UnmarkObjectVisitor unmark_visitor(this);
-
- obj->IterateBody(Map::cast(map_p)->instance_type(),
- obj->SizeFromMap(Map::cast(map_p)),
- &unmark_visitor);
- }
-
-
- void MarkRootObjectRecursively(Object** root) {
- if (search_for_any_global_) {
- ASSERT(search_target_ == NULL);
- } else {
- ASSERT(search_target_->IsHeapObject());
- }
- found_target_ = false;
- object_stack_.Clear();
-
- MarkObjectRecursively(root);
- UnmarkObjectRecursively(root);
-
- if (found_target_) {
- PrintF("=====================================\n");
- PrintF("==== Path to object ====\n");
- PrintF("=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- for (int i = 0; i < object_stack_.length(); i++) {
- if (i > 0) PrintF("\n |\n |\n V\n\n");
- Object* obj = object_stack_[i];
- obj->Print();
- }
- PrintF("=====================================\n");
- }
- }
-
- // Helper class for visiting HeapObjects recursively.
- class MarkRootVisitor: public ObjectVisitor {
- public:
- explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
-
- void VisitPointers(Object** start, Object** end) {
- // Visit all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsHeapObject())
- utils_->MarkRootObjectRecursively(p);
- }
- }
-
- HeapDebugUtils* utils_;
- };
-
- bool search_for_any_global_;
- Object* search_target_;
- bool found_target_;
- List<Object*> object_stack_;
- Heap* heap_;
-
- friend class Heap;
-};
-
-#endif
-
V8_DECLARE_ONCE(initialize_gc_once);
@@ -6282,10 +6504,9 @@ static void InitializeGCOnce() {
MarkCompactCollector::Initialize();
}
-bool Heap::SetUp(bool create_heap_objects) {
+bool Heap::SetUp() {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
- debug_utils_ = new HeapDebugUtils(this);
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@@ -6374,17 +6595,6 @@ bool Heap::SetUp(bool create_heap_objects) {
}
}
- if (create_heap_objects) {
- // Create initial maps.
- if (!CreateInitialMaps()) return false;
- if (!CreateApiObjects()) return false;
-
- // Create initial objects
- if (!CreateInitialObjects()) return false;
-
- native_contexts_list_ = undefined_value();
- }
-
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -6395,6 +6605,18 @@ bool Heap::SetUp(bool create_heap_objects) {
return true;
}
+bool Heap::CreateHeapObjects() {
+ // Create initial maps.
+ if (!CreateInitialMaps()) return false;
+ if (!CreateApiObjects()) return false;
+
+ // Create initial objects
+ if (!CreateInitialObjects()) return false;
+
+ native_contexts_list_ = undefined_value();
+ return true;
+}
+
void Heap::SetStackLimits() {
ASSERT(isolate_ != NULL);
@@ -6421,14 +6643,16 @@ void Heap::TearDown() {
#endif
if (FLAG_print_cumulative_gc_stat) {
- PrintF("\n\n");
+ PrintF("\n");
PrintF("gc_count=%d ", gc_count_);
PrintF("mark_sweep_count=%d ", ms_count_);
- PrintF("max_gc_pause=%d ", get_max_gc_pause());
- PrintF("total_gc_time=%d ", total_gc_time_ms_);
- PrintF("min_in_mutator=%d ", get_min_in_mutator());
+ PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
+ PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
+ PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
get_max_alive_after_gc());
+ PrintF("total_marking_time=%.1f ", marking_time());
+ PrintF("total_sweeping_time=%.1f ", sweeping_time());
PrintF("\n\n");
}
@@ -6436,6 +6660,8 @@ void Heap::TearDown() {
external_string_table_.TearDown();
+ error_object_list_.TearDown();
+
new_space_.TearDown();
if (old_pointer_space_ != NULL) {
@@ -6480,22 +6706,6 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
-
-#ifdef DEBUG
- delete debug_utils_;
- debug_utils_ = NULL;
-#endif
-}
-
-
-void Heap::Shrink() {
- // Try to shrink all paged spaces.
- PagedSpaces spaces;
- for (PagedSpace* space = spaces.next();
- space != NULL;
- space = spaces.next()) {
- space->ReleaseAllUnusedPages();
- }
}
@@ -6563,19 +6773,19 @@ void Heap::PrintHandles() {
Space* AllSpaces::next() {
switch (counter_++) {
case NEW_SPACE:
- return HEAP->new_space();
+ return heap_->new_space();
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
case MAP_SPACE:
- return HEAP->map_space();
+ return heap_->map_space();
case CELL_SPACE:
- return HEAP->cell_space();
+ return heap_->cell_space();
case LO_SPACE:
- return HEAP->lo_space();
+ return heap_->lo_space();
default:
return NULL;
}
@@ -6585,15 +6795,15 @@ Space* AllSpaces::next() {
PagedSpace* PagedSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
case MAP_SPACE:
- return HEAP->map_space();
+ return heap_->map_space();
case CELL_SPACE:
- return HEAP->cell_space();
+ return heap_->cell_space();
default:
return NULL;
}
@@ -6604,26 +6814,28 @@ PagedSpace* PagedSpaces::next() {
OldSpace* OldSpaces::next() {
switch (counter_++) {
case OLD_POINTER_SPACE:
- return HEAP->old_pointer_space();
+ return heap_->old_pointer_space();
case OLD_DATA_SPACE:
- return HEAP->old_data_space();
+ return heap_->old_data_space();
case CODE_SPACE:
- return HEAP->code_space();
+ return heap_->code_space();
default:
return NULL;
}
}
-SpaceIterator::SpaceIterator()
- : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap)
+ : heap_(heap),
+ current_space_(FIRST_SPACE),
iterator_(NULL),
size_func_(NULL) {
}
-SpaceIterator::SpaceIterator(HeapObjectCallback size_func)
- : current_space_(FIRST_SPACE),
+SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
+ : heap_(heap),
+ current_space_(FIRST_SPACE),
iterator_(NULL),
size_func_(size_func) {
}
@@ -6663,25 +6875,26 @@ ObjectIterator* SpaceIterator::CreateIterator() {
switch (current_space_) {
case NEW_SPACE:
- iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_);
+ iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
break;
case OLD_POINTER_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_);
+ iterator_ =
+ new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
break;
case OLD_DATA_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
break;
case CODE_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
break;
case MAP_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
break;
case CELL_SPACE:
- iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_);
+ iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
break;
case LO_SPACE:
- iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_);
+ iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
break;
}
@@ -6752,15 +6965,18 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
};
-HeapIterator::HeapIterator()
- : filtering_(HeapIterator::kNoFiltering),
+HeapIterator::HeapIterator(Heap* heap)
+ : heap_(heap),
+ filtering_(HeapIterator::kNoFiltering),
filter_(NULL) {
Init();
}
-HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering)
- : filtering_(filtering),
+HeapIterator::HeapIterator(Heap* heap,
+ HeapIterator::HeapObjectsFiltering filtering)
+ : heap_(heap),
+ filtering_(filtering),
filter_(NULL) {
Init();
}
@@ -6773,7 +6989,7 @@ HeapIterator::~HeapIterator() {
void HeapIterator::Init() {
// Start the iteration.
- space_iterator_ = new SpaceIterator;
+ space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
case kFilterUnreachable:
filter_ = new UnreachableObjectsFilter;
@@ -6840,7 +7056,7 @@ void HeapIterator::reset() {
}
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+#ifdef DEBUG
Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
@@ -7007,10 +7223,8 @@ void PathTracer::ProcessResults() {
PrintF("=====================================\n");
}
}
-#endif // DEBUG || LIVE_OBJECT_LIST
-#ifdef DEBUG
// Triggers a depth-first traversal of reachable objects from one
// given root object and finds a path to a specific heap object and
// prints it.
@@ -7040,9 +7254,9 @@ void Heap::TracePathToGlobal() {
#endif
-static intptr_t CountTotalHolesSize() {
+static intptr_t CountTotalHolesSize(Heap* heap) {
intptr_t holes_size = 0;
- OldSpaces spaces;
+ OldSpaces spaces(heap);
for (OldSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -7063,6 +7277,9 @@ GCTracer::GCTracer(Heap* heap,
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
+ nodes_died_in_new_space_(0),
+ nodes_copied_in_new_space_(0),
+ nodes_promoted_(0),
heap_(heap),
gc_reason_(gc_reason),
collector_reason_(collector_reason) {
@@ -7075,7 +7292,7 @@ GCTracer::GCTracer(Heap* heap,
scopes_[i] = 0;
}
- in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
+ in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
allocated_since_last_gc_ =
heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
@@ -7103,7 +7320,7 @@ GCTracer::~GCTracer() {
heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
- int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_);
+ double time = heap_->last_gc_end_timestamp_ - start_time_;
// Update cumulative GC statistics if required.
if (FLAG_print_cumulative_gc_stat) {
@@ -7113,7 +7330,7 @@ GCTracer::~GCTracer() {
heap_->alive_after_last_gc_);
if (!first_gc) {
heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
- static_cast<int>(spent_in_mutator_));
+ spent_in_mutator_);
}
} else if (FLAG_trace_gc_verbose) {
heap_->total_gc_time_ms_ += time;
@@ -7121,6 +7338,9 @@ GCTracer::~GCTracer() {
if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
+ heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
+
+ if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
if (!FLAG_trace_gc_nvp) {
@@ -7137,16 +7357,16 @@ GCTracer::~GCTracer() {
end_memory_size_mb);
if (external_time > 0) PrintF("%d / ", external_time);
- PrintF("%d ms", time);
+ PrintF("%.1f ms", time);
if (steps_count_ > 0) {
if (collector_ == SCAVENGER) {
- PrintF(" (+ %d ms in %d steps since last GC)",
- static_cast<int>(steps_took_since_last_gc_),
+ PrintF(" (+ %.1f ms in %d steps since last GC)",
+ steps_took_since_last_gc_,
steps_count_since_last_gc_);
} else {
- PrintF(" (+ %d ms in %d steps since start of marking, "
- "biggest step %f ms)",
- static_cast<int>(steps_took_),
+ PrintF(" (+ %.1f ms in %d steps since start of marking, "
+ "biggest step %.1f ms)",
+ steps_took_,
steps_count_,
longest_step_);
}
@@ -7162,8 +7382,8 @@ GCTracer::~GCTracer() {
PrintF(".\n");
} else {
- PrintF("pause=%d ", time);
- PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_));
+ PrintF("pause=%.1f ", time);
+ PrintF("mutator=%.1f ", spent_in_mutator_);
PrintF("gc=");
switch (collector_) {
case SCAVENGER:
@@ -7177,39 +7397,39 @@ GCTracer::~GCTracer() {
}
PrintF(" ");
- PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
- PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
- PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
- PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
- PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
- PrintF("new_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
- PrintF("root_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
- PrintF("old_new=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
- PrintF("compaction_ptrs=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
- PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
- Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
- PrintF("misc_compaction=%d ",
- static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
+ PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
+ PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
+ PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
+ PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
+ PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
+ PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
+ PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
+ PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
+ PrintF("compaction_ptrs=%.1f ",
+ scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
+ PrintF("intracompaction_ptrs=%.1f ",
+ scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
+ PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
in_free_list_or_wasted_before_gc_);
- PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
+ PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
+ PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
+ PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
+ PrintF("nodes_promoted=%d ", nodes_promoted_);
if (collector_ == SCAVENGER) {
PrintF("stepscount=%d ", steps_count_since_last_gc_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
+ PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
} else {
PrintF("stepscount=%d ", steps_count_);
- PrintF("stepstook=%d ", static_cast<int>(steps_took_));
+ PrintF("stepstook=%.1f ", steps_took_);
+ PrintF("longeststep=%.1f ", longest_step_);
}
PrintF("\n");
@@ -7251,9 +7471,9 @@ int KeyedLookupCache::Lookup(Map* map, String* name) {
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = (Hash(map, symbol) & kHashMask);
+ String* internalized_name;
+ if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ int index = (Hash(map, internalized_name) & kHashMask);
// After a GC there will be free slots, so we use them in order (this may
// help to get the most frequently used one in position 0).
for (int i = 0; i< kEntriesPerBucket; i++) {
@@ -7261,7 +7481,7 @@ void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
Object* free_entry_indicator = NULL;
if (key.map == free_entry_indicator) {
key.map = map;
- key.name = symbol;
+ key.name = internalized_name;
field_offsets_[index + i] = field_offset;
return;
}
@@ -7278,7 +7498,7 @@ void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
// Write the new first entry.
Key& key = keys_[index];
key.map = map;
- key.name = symbol;
+ key.name = internalized_name;
field_offsets_[index] = field_offset;
}
}
@@ -7340,6 +7560,8 @@ void ExternalStringTable::CleanUp() {
}
}
new_space_strings_.Rewind(last);
+ new_space_strings_.Trim();
+
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
if (old_space_strings_[i] == heap_->the_hole_value()) {
@@ -7349,6 +7571,7 @@ void ExternalStringTable::CleanUp() {
old_space_strings_[last++] = old_space_strings_[i];
}
old_space_strings_.Rewind(last);
+ old_space_strings_.Trim();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -7376,6 +7599,119 @@ void ExternalStringTable::TearDown() {
}
+// Update all references.
+void ErrorObjectList::UpdateReferences() {
+ for (int i = 0; i < list_.length(); i++) {
+ HeapObject* object = HeapObject::cast(list_[i]);
+ MapWord first_word = object->map_word();
+ if (first_word.IsForwardingAddress()) {
+ list_[i] = first_word.ToForwardingAddress();
+ }
+ }
+}
+
+
+// Unforwarded objects in new space are dead and removed from the list.
+void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
+ if (list_.is_empty()) return;
+ if (!nested_) {
+ int write_index = 0;
+ for (int i = 0; i < list_.length(); i++) {
+ MapWord first_word = HeapObject::cast(list_[i])->map_word();
+ if (first_word.IsForwardingAddress()) {
+ list_[write_index++] = first_word.ToForwardingAddress();
+ }
+ }
+ list_.Rewind(write_index);
+ } else {
+ // If a GC is triggered during DeferredFormatStackTrace, we do not move
+ // objects in the list, just remove dead ones, as to not confuse the
+ // loop in DeferredFormatStackTrace.
+ for (int i = 0; i < list_.length(); i++) {
+ MapWord first_word = HeapObject::cast(list_[i])->map_word();
+ list_[i] = first_word.IsForwardingAddress()
+ ? first_word.ToForwardingAddress()
+ : heap->the_hole_value();
+ }
+ }
+}
+
+
+void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
+ // If formatting the stack trace causes a GC, this method will be
+ // recursively called. In that case, skip the recursive call, since
+ // the loop modifies the list while iterating over it.
+ if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
+ nested_ = true;
+ HandleScope scope(isolate);
+ Handle<String> stack_key = isolate->factory()->stack_string();
+ int write_index = 0;
+ int budget = kBudgetPerGC;
+ for (int i = 0; i < list_.length(); i++) {
+ Object* object = list_[i];
+ JSFunction* getter_fun;
+
+ { AssertNoAllocation assert;
+ // Skip possible holes in the list.
+ if (object->IsTheHole()) continue;
+ if (isolate->heap()->InNewSpace(object) || budget == 0) {
+ list_[write_index++] = object;
+ continue;
+ }
+
+ // Check whether the stack property is backed by the original getter.
+ LookupResult lookup(isolate);
+ JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
+ if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
+ Object* callback = lookup.GetCallbackObject();
+ if (!callback->IsAccessorPair()) continue;
+ Object* getter_obj = AccessorPair::cast(callback)->getter();
+ if (!getter_obj->IsJSFunction()) continue;
+ getter_fun = JSFunction::cast(getter_obj);
+ String* key = isolate->heap()->hidden_stack_trace_string();
+ if (key != getter_fun->GetHiddenProperty(key)) continue;
+ }
+
+ budget--;
+ HandleScope scope(isolate);
+ bool has_exception = false;
+#ifdef DEBUG
+ Handle<Map> map(HeapObject::cast(object)->map(), isolate);
+#endif
+ Handle<Object> object_handle(object, isolate);
+ Handle<Object> getter_handle(getter_fun, isolate);
+ Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
+ ASSERT(*map == HeapObject::cast(*object_handle)->map());
+ if (has_exception) {
+ // Hit an exception (most likely a stack overflow).
+ // Wrap up this pass and retry after another GC.
+ isolate->clear_pending_exception();
+ // We use the handle since calling the getter might have caused a GC.
+ list_[write_index++] = *object_handle;
+ budget = 0;
+ }
+ }
+ list_.Rewind(write_index);
+ list_.Trim();
+ nested_ = false;
+}
+
+
+void ErrorObjectList::RemoveUnmarked(Heap* heap) {
+ for (int i = 0; i < list_.length(); i++) {
+ HeapObject* object = HeapObject::cast(list_[i]);
+ if (!Marking::MarkBitFrom(object).Get()) {
+ list_[i] = heap->the_hole_value();
+ }
+ }
+}
+
+
+void ErrorObjectList::TearDown() {
+ list_.Free();
+}
+
+
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
diff --git a/src/3rdparty/v8/src/heap.h b/src/3rdparty/v8/src/heap.h
index 12cd295..d690e18 100644
--- a/src/3rdparty/v8/src/heap.h
+++ b/src/3rdparty/v8/src/heap.h
@@ -61,8 +61,6 @@ namespace internal {
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
V(Map, meta_map, MetaMap) \
- V(Map, ascii_symbol_map, AsciiSymbolMap) \
- V(Map, ascii_string_map, AsciiStringMap) \
V(Map, heap_number_map, HeapNumberMap) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@@ -74,7 +72,6 @@ namespace internal {
V(Map, hash_table_map, HashTableMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
- V(String, empty_string, EmptyString) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(Smi, stack_limit, StackLimit) \
V(Oddball, arguments_marker, ArgumentsMarker) \
@@ -90,29 +87,42 @@ namespace internal {
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
V(Object, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
- V(Map, string_map, StringMap) \
V(Map, symbol_map, SymbolMap) \
+ V(Map, string_map, StringMap) \
+ V(Map, ascii_string_map, AsciiStringMap) \
V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_ascii_string_map, ConsAsciiStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \
- V(Map, cons_symbol_map, ConsSymbolMap) \
- V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \
- V(Map, external_symbol_map, ExternalSymbolMap) \
- V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \
- V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \
V(Map, external_string_map, ExternalStringMap) \
V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \
V(Map, external_ascii_string_map, ExternalAsciiStringMap) \
- V(Map, short_external_symbol_map, ShortExternalSymbolMap) \
- V(Map, \
- short_external_symbol_with_ascii_data_map, \
- ShortExternalSymbolWithAsciiDataMap) \
- V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \
V(Map, short_external_string_map, ShortExternalStringMap) \
V(Map, \
short_external_string_with_ascii_data_map, \
ShortExternalStringWithAsciiDataMap) \
+ V(Map, internalized_string_map, InternalizedStringMap) \
+ V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \
+ V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \
+ V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \
+ V(Map, \
+ external_internalized_string_map, \
+ ExternalInternalizedStringMap) \
+ V(Map, \
+ external_internalized_string_with_ascii_data_map, \
+ ExternalInternalizedStringWithAsciiDataMap) \
+ V(Map, \
+ external_ascii_internalized_string_map, \
+ ExternalAsciiInternalizedStringMap) \
+ V(Map, \
+ short_external_internalized_string_map, \
+ ShortExternalInternalizedStringMap) \
+ V(Map, \
+ short_external_internalized_string_with_ascii_data_map, \
+ ShortExternalInternalizedStringWithAsciiDataMap) \
+ V(Map, \
+ short_external_ascii_internalized_string_map, \
+ ShortExternalAsciiInternalizedStringMap) \
V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \
V(Map, undetectable_string_map, UndetectableStringMap) \
V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \
@@ -155,104 +165,109 @@ namespace internal {
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
- V(JSObject, observation_state, ObservationState)
+ V(JSObject, observation_state, ObservationState) \
+ V(Map, external_map, ExternalMap)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
- V(SymbolTable, symbol_table, SymbolTable)
-
-#define SYMBOL_LIST(V) \
- V(Array_symbol, "Array") \
- V(Object_symbol, "Object") \
- V(Proto_symbol, "__proto__") \
- V(StringImpl_symbol, "StringImpl") \
- V(arguments_symbol, "arguments") \
- V(Arguments_symbol, "Arguments") \
- V(call_symbol, "call") \
- V(apply_symbol, "apply") \
- V(caller_symbol, "caller") \
- V(boolean_symbol, "boolean") \
- V(Boolean_symbol, "Boolean") \
- V(callee_symbol, "callee") \
- V(constructor_symbol, "constructor") \
- V(code_symbol, ".code") \
- V(result_symbol, ".result") \
- V(dot_for_symbol, ".for.") \
- V(catch_var_symbol, ".catch-var") \
- V(empty_symbol, "") \
- V(eval_symbol, "eval") \
- V(function_symbol, "function") \
- V(length_symbol, "length") \
- V(module_symbol, "module") \
- V(name_symbol, "name") \
- V(native_symbol, "native") \
- V(null_symbol, "null") \
- V(number_symbol, "number") \
- V(Number_symbol, "Number") \
- V(nan_symbol, "NaN") \
- V(RegExp_symbol, "RegExp") \
- V(source_symbol, "source") \
- V(global_symbol, "global") \
- V(ignore_case_symbol, "ignoreCase") \
- V(multiline_symbol, "multiline") \
- V(input_symbol, "input") \
- V(index_symbol, "index") \
- V(last_index_symbol, "lastIndex") \
- V(object_symbol, "object") \
- V(prototype_symbol, "prototype") \
- V(string_symbol, "string") \
- V(String_symbol, "String") \
- V(Date_symbol, "Date") \
- V(Error_symbol, "Error") \
- V(this_symbol, "this") \
- V(to_string_symbol, "toString") \
- V(char_at_symbol, "CharAt") \
- V(undefined_symbol, "undefined") \
- V(value_of_symbol, "valueOf") \
- V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \
- V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \
- V(KeyedLoadElementMonomorphic_symbol, \
+ V(StringTable, string_table, StringTable)
+
+#define INTERNALIZED_STRING_LIST(V) \
+ V(Array_string, "Array") \
+ V(Object_string, "Object") \
+ V(proto_string, "__proto__") \
+ V(StringImpl_string, "StringImpl") \
+ V(arguments_string, "arguments") \
+ V(Arguments_string, "Arguments") \
+ V(call_string, "call") \
+ V(apply_string, "apply") \
+ V(caller_string, "caller") \
+ V(boolean_string, "boolean") \
+ V(Boolean_string, "Boolean") \
+ V(callee_string, "callee") \
+ V(constructor_string, "constructor") \
+ V(code_string, ".code") \
+ V(result_string, ".result") \
+ V(dot_for_string, ".for.") \
+ V(catch_var_string, ".catch-var") \
+ V(empty_string, "") \
+ V(eval_string, "eval") \
+ V(function_string, "function") \
+ V(length_string, "length") \
+ V(module_string, "module") \
+ V(name_string, "name") \
+ V(native_string, "native") \
+ V(null_string, "null") \
+ V(number_string, "number") \
+ V(Number_string, "Number") \
+ V(nan_string, "NaN") \
+ V(RegExp_string, "RegExp") \
+ V(source_string, "source") \
+ V(global_string, "global") \
+ V(ignore_case_string, "ignoreCase") \
+ V(multiline_string, "multiline") \
+ V(input_string, "input") \
+ V(index_string, "index") \
+ V(last_index_string, "lastIndex") \
+ V(object_string, "object") \
+ V(prototype_string, "prototype") \
+ V(string_string, "string") \
+ V(String_string, "String") \
+ V(Date_string, "Date") \
+ V(Error_string, "Error") \
+ V(this_string, "this") \
+ V(to_string_string, "toString") \
+ V(char_at_string, "CharAt") \
+ V(undefined_string, "undefined") \
+ V(value_of_string, "valueOf") \
+ V(stack_string, "stack") \
+ V(InitializeVarGlobal_string, "InitializeVarGlobal") \
+ V(InitializeConstGlobal_string, "InitializeConstGlobal") \
+ V(KeyedLoadElementMonomorphic_string, \
"KeyedLoadElementMonomorphic") \
- V(KeyedStoreElementMonomorphic_symbol, \
+ V(KeyedStoreElementMonomorphic_string, \
"KeyedStoreElementMonomorphic") \
- V(KeyedStoreAndGrowElementMonomorphic_symbol, \
+ V(KeyedStoreAndGrowElementMonomorphic_string, \
"KeyedStoreAndGrowElementMonomorphic") \
- V(stack_overflow_symbol, "kStackOverflowBoilerplate") \
- V(illegal_access_symbol, "illegal access") \
- V(out_of_memory_symbol, "out-of-memory") \
- V(illegal_execution_state_symbol, "illegal execution state") \
- V(get_symbol, "get") \
- V(set_symbol, "set") \
- V(function_class_symbol, "Function") \
- V(illegal_argument_symbol, "illegal argument") \
- V(MakeReferenceError_symbol, "MakeReferenceError") \
- V(MakeSyntaxError_symbol, "MakeSyntaxError") \
- V(MakeTypeError_symbol, "MakeTypeError") \
- V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \
- V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \
- V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \
- V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \
- V(illegal_return_symbol, "illegal_return") \
- V(illegal_break_symbol, "illegal_break") \
- V(illegal_continue_symbol, "illegal_continue") \
- V(unknown_label_symbol, "unknown_label") \
- V(redeclaration_symbol, "redeclaration") \
- V(failure_symbol, "<failure>") \
- V(space_symbol, " ") \
- V(exec_symbol, "exec") \
- V(zero_symbol, "0") \
- V(global_eval_symbol, "GlobalEval") \
- V(identity_hash_symbol, "v8::IdentityHash") \
- V(closure_symbol, "(closure)") \
- V(use_strict, "use strict") \
- V(dot_symbol, ".") \
- V(anonymous_function_symbol, "(anonymous function)") \
- V(compare_ic_symbol, "==") \
- V(strict_compare_ic_symbol, "===") \
- V(infinity_symbol, "Infinity") \
- V(minus_infinity_symbol, "-Infinity") \
- V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \
- V(query_colon_symbol, "(?:)")
+ V(stack_overflow_string, "kStackOverflowBoilerplate") \
+ V(illegal_access_string, "illegal access") \
+ V(out_of_memory_string, "out-of-memory") \
+ V(illegal_execution_state_string, "illegal execution state") \
+ V(get_string, "get") \
+ V(set_string, "set") \
+ V(map_field_string, "%map") \
+ V(elements_field_string, "%elements") \
+ V(length_field_string, "%length") \
+ V(function_class_string, "Function") \
+ V(illegal_argument_string, "illegal argument") \
+ V(MakeReferenceError_string, "MakeReferenceError") \
+ V(MakeSyntaxError_string, "MakeSyntaxError") \
+ V(MakeTypeError_string, "MakeTypeError") \
+ V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \
+ V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \
+ V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \
+ V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \
+ V(illegal_return_string, "illegal_return") \
+ V(illegal_break_string, "illegal_break") \
+ V(illegal_continue_string, "illegal_continue") \
+ V(unknown_label_string, "unknown_label") \
+ V(redeclaration_string, "redeclaration") \
+ V(failure_string, "<failure>") \
+ V(space_string, " ") \
+ V(exec_string, "exec") \
+ V(zero_string, "0") \
+ V(global_eval_string, "GlobalEval") \
+ V(identity_hash_string, "v8::IdentityHash") \
+ V(closure_string, "(closure)") \
+ V(use_strict_string, "use strict") \
+ V(dot_string, ".") \
+ V(anonymous_function_string, "(anonymous function)") \
+ V(compare_ic_string, "==") \
+ V(strict_compare_ic_string, "===") \
+ V(infinity_string, "Infinity") \
+ V(minus_infinity_string, "-Infinity") \
+ V(hidden_stack_trace_string, "v8::hidden_stack_trace") \
+ V(query_colon_string, "(?:)") \
// Forward declarations.
class GCTracer;
@@ -286,14 +301,6 @@ class StoreBufferRebuilder {
-// The all static Heap captures the interface to the global object heap.
-// All JavaScript contexts by this process share the same object heap.
-
-#ifdef DEBUG
-class HeapDebugUtils;
-#endif
-
-
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
@@ -439,6 +446,41 @@ class ExternalStringTable {
};
+// The stack property of an error object is implemented as a getter that
+// formats the attached raw stack trace into a string. This raw stack trace
+// keeps code and function objects alive until the getter is called the first
+// time. To release those objects, we call the getter after each GC for
+// newly tenured error objects that are kept in a list.
+class ErrorObjectList {
+ public:
+ inline void Add(JSObject* object);
+
+ inline void Iterate(ObjectVisitor* v);
+
+ void TearDown();
+
+ void RemoveUnmarked(Heap* heap);
+
+ void DeferredFormatStackTrace(Isolate* isolate);
+
+ void UpdateReferences();
+
+ void UpdateReferencesInNewSpace(Heap* heap);
+
+ private:
+ static const int kBudgetPerGC = 16;
+
+ ErrorObjectList() : nested_(false) { }
+
+ friend class Heap;
+
+ List<Object*> list_;
+ bool nested_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorObjectList);
+};
+
+
enum ArrayStorageAllocationMode {
DONT_INITIALIZE_ARRAY_ELEMENTS,
INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
@@ -453,10 +495,13 @@ class Heap {
intptr_t max_executable_size);
bool ConfigureHeapDefault();
- // Initializes the global object heap. If create_heap_objects is true,
- // also creates the basic non-mutable objects.
+ // Prepares the heap, setting up memory areas that are needed in the isolate
+ // without actually creating any objects.
+ bool SetUp();
+
+ // Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
- bool SetUp(bool create_heap_objects);
+ bool CreateHeapObjects();
// Destroys all memory allocated by the heap.
void TearDown();
@@ -561,7 +606,12 @@ class Heap {
// failed.
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateJSObject(
- JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED);
+ JSFunction* constructor,
+ PretenureFlag pretenure = NOT_TENURED);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite(
+ JSFunction* constructor,
+ Handle<Object> allocation_site_info_payload);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -575,6 +625,10 @@ class Heap {
pretenure);
}
+ inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_payload);
+
// Allocate a JSArray with a specified length but elements that are left
// uninitialized.
MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage(
@@ -584,10 +638,24 @@ class Heap {
ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite(
+ ElementsKind elements_kind,
+ int length,
+ int capacity,
+ Handle<Object> allocation_site_payload,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage(
+ JSArray* array,
+ int length,
+ int capacity,
+ ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS);
+
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
+ int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
@@ -601,6 +669,8 @@ class Heap {
// Returns failure if allocation failed.
MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source);
+ MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source);
+
// Allocates the function prototype.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -642,12 +712,18 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap(
Map* map, PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite(
+ Map* map, Handle<Object> allocation_site_info_payload);
+
// Allocates a heap object based on the map.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space);
+ MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map,
+ AllocationSpace space, Handle<Object> allocation_site_info_payload);
+
// Allocates a JS Map in the heap.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -670,6 +746,9 @@ class Heap {
// Allocates a serialized scope info.
MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
+ // Allocates an External object for v8's external API.
+ MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
+
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -706,9 +785,16 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
- Vector<const char> str,
+ MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
+ Vector<const uint8_t> str,
PretenureFlag pretenure = NOT_TENURED);
+ // TODO(dcarney): remove this function.
+ MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte(
+ Vector<const char> str,
+ PretenureFlag pretenure = NOT_TENURED) {
+ return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str),
+ pretenure);
+ }
MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
@@ -720,28 +806,33 @@ class Heap {
Vector<const uc16> str,
PretenureFlag pretenure = NOT_TENURED);
- // Allocates a symbol in old space based on the character stream.
- // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
- // failed.
+ // Allocates an internalized string in old space based on the character
+ // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the
+ // allocation failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8(
+ Vector<const char> str,
+ int chars,
+ uint32_t hash_field);
- MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol(
- Vector<const char> str,
+ MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString(
+ Vector<const uint8_t> str,
uint32_t hash_field);
- MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol(
+ MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString(
Vector<const uc16> str,
uint32_t hash_field);
- MUST_USE_RESULT MaybeObject* AllocateInternalSymbol(
- unibrow::CharacterStream* buffer, int chars, uint32_t hash_field);
+ template<typename T>
+ static inline bool IsOneByte(T t, int chars);
- MUST_USE_RESULT MaybeObject* AllocateExternalSymbol(
- Vector<const char> str,
- int chars);
+ template<typename T>
+ MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
+
+ template<bool is_one_byte, typename T>
+ MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl(
+ T t, int chars, uint32_t hash_field);
// Allocates and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
@@ -750,7 +841,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
+ MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
int length,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
@@ -787,6 +878,13 @@ class Heap {
void* external_pointer,
PretenureFlag pretenure);
+ // Allocate a symbol.
+ // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
+ // failed.
+ // Please note this does not perform a garbage collection.
+ MUST_USE_RESULT MaybeObject* AllocateSymbol(
+ PretenureFlag pretenure = NOT_TENURED);
+
// Allocate a tenured JS global property cell.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
@@ -809,6 +907,10 @@ class Heap {
// Please note this does not perform a garbage collection.
MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length);
+ // Move len elements within a given array from src_index index to dst_index
+ // index.
+ void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
+
// Make a copy of src and return it. Returns
// Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src);
@@ -1033,28 +1135,28 @@ class Heap {
// the provided data as the relocation information.
MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info);
- // Finds the symbol for string in the symbol table.
- // If not found, a new symbol is added to the table and returned.
+ // Finds the internalized copy for string in the string table.
+ // If not found, a new string is added to the table and returned.
// Returns Failure::RetryAfterGC(requested_bytes, space) if allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) {
- return LookupSymbol(CStrVector(str));
+ MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str);
+ MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) {
+ return InternalizeUtf8String(CStrVector(str));
}
- MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
- int from,
- int length);
+ MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
+ Vector<const uint8_t> str);
+ MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str);
+ MUST_USE_RESULT MaybeObject* InternalizeString(String* str);
+ MUST_USE_RESULT MaybeObject* InternalizeOneByteString(
+ Handle<SeqOneByteString> string, int from, int length);
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
+ bool InternalizeStringIfExists(String* str, String** result);
+ bool InternalizeTwoCharsStringIfExists(String* str, String** result);
- // Compute the matching symbol map for a string if possible.
+ // Compute the matching internalized string map for a string if possible.
// NULL is returned if string is in new space or not flattened.
- Map* SymbolMapForString(String* str);
+ Map* InternalizedStringMapForString(String* str);
// Tries to flatten a string before compare operation.
//
@@ -1182,15 +1284,15 @@ class Heap {
STRUCT_LIST(STRUCT_MAP_ACCESSOR)
#undef STRUCT_MAP_ACCESSOR
-#define SYMBOL_ACCESSOR(name, str) String* name() { \
+#define STRING_ACCESSOR(name, str) String* name() { \
return String::cast(roots_[k##name##RootIndex]); \
}
- SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
- // The hidden_symbol is special because it is the empty string, but does
+ // The hidden_string is special because it is the empty string, but does
// not match the empty string.
- String* hidden_symbol() { return hidden_symbol_; }
+ String* hidden_string() { return hidden_string_; }
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
@@ -1277,6 +1379,11 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
+
+
+ bool weak_embedded_maps_verification_enabled() {
+ return no_weak_embedded_maps_verification_scope_depth_ == 0;
+ }
#endif
#ifdef DEBUG
@@ -1311,26 +1418,23 @@ class Heap {
// Print short heap statistics.
void PrintShortHeapStatistics();
- // Makes a new symbol object
+ // Makes a new internalized string object
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this function does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* CreateSymbol(
+ MUST_USE_RESULT MaybeObject* CreateInternalizedString(
const char* str, int length, int hash);
- MUST_USE_RESULT MaybeObject* CreateSymbol(String* str);
+ MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str);
// Write barrier support for address[offset] = o.
- inline void RecordWrite(Address address, int offset);
+ INLINE(void RecordWrite(Address address, int offset));
// Write barrier support for address[start : start + len[ = o.
- inline void RecordWrites(Address address, int start, int len);
+ INLINE(void RecordWrites(Address address, int start, int len));
// Given an address occupied by a live code object, return that object.
Object* FindCodeObject(Address a);
- // Invoke Shrink on shrinkable spaces.
- void Shrink();
-
enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
@@ -1437,17 +1541,17 @@ class Heap {
STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
#undef ROOT_INDEX_DECLARATION
-#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex,
- SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_DECLARATION
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
// Utility type maps
#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
STRUCT_LIST(DECLARE_STRUCT_MAP)
#undef DECLARE_STRUCT_MAP
- kSymbolTableRootIndex,
- kStrongRootListLength = kSymbolTableRootIndex,
+ kStringTableRootIndex,
+ kStrongRootListLength = kStringTableRootIndex,
kRootListLength
};
@@ -1455,7 +1559,7 @@ class Heap {
STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex);
STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
+ STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
// Generated code can embed direct references to non-writable roots if
// they are in new space.
@@ -1543,13 +1647,31 @@ class Heap {
}
// Returns maximum GC pause.
- int get_max_gc_pause() { return max_gc_pause_; }
+ double get_max_gc_pause() { return max_gc_pause_; }
// Returns maximum size of objects alive after GC.
intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
// Returns minimal interval between two subsequent collections.
- int get_min_in_mutator() { return min_in_mutator_; }
+ double get_min_in_mutator() { return min_in_mutator_; }
+
+ // TODO(hpayer): remove, should be handled by GCTracer
+ void AddMarkingTime(double marking_time) {
+ marking_time_ += marking_time;
+ }
+
+ double marking_time() const {
+ return marking_time_;
+ }
+
+ // TODO(hpayer): remove, should be handled by GCTracer
+ void AddSweepingTime(double sweeping_time) {
+ sweeping_time_ += sweeping_time;
+ }
+
+ double sweeping_time() const {
+ return sweeping_time_;
+ }
MarkCompactCollector* mark_compact_collector() {
return &mark_compact_collector_;
@@ -1568,11 +1690,13 @@ class Heap {
}
bool IsSweepingComplete() {
- return old_data_space()->IsSweepingComplete() &&
- old_pointer_space()->IsSweepingComplete();
+ return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
+ old_data_space()->IsLazySweepingComplete() &&
+ old_pointer_space()->IsLazySweepingComplete();
}
bool AdvanceSweepers(int step_size) {
+ ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
@@ -1582,6 +1706,10 @@ class Heap {
return &external_string_table_;
}
+ ErrorObjectList* error_object_list() {
+ return &error_object_list_;
+ }
+
// Returns the current sweep generation.
int sweep_generation() {
return sweep_generation_;
@@ -1589,13 +1717,8 @@ class Heap {
inline Isolate* isolate();
- inline void CallGlobalGCPrologueCallback() {
- if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_();
- }
-
- inline void CallGlobalGCEpilogueCallback() {
- if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_();
- }
+ void CallGCPrologueCallbacks(GCType gc_type);
+ void CallGCEpilogueCallbacks(GCType gc_type);
inline bool OldGenerationAllocationLimitReached();
@@ -1794,8 +1917,6 @@ class Heap {
// Do we expect to be able to handle allocation failure at this
// time?
bool disallow_allocation_failure_;
-
- HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
@@ -1844,7 +1965,7 @@ class Heap {
RootListIndex index;
};
- struct ConstantSymbolTable {
+ struct ConstantStringTable {
const char* contents;
RootListIndex index;
};
@@ -1856,12 +1977,12 @@ class Heap {
};
static const StringTypeTable string_type_table[];
- static const ConstantSymbolTable constant_symbol_table[];
+ static const ConstantStringTable constant_string_table[];
static const StructTable struct_table[];
- // The special hidden symbol which is an empty string, but does not match
+ // The special hidden string which is an empty string, but does not match
// any string when looked up in properties.
- String* hidden_symbol_;
+ String* hidden_string_;
// GC callback function, called before and after mark-compact GC.
// Allocations in the callback function are disallowed.
@@ -1912,8 +2033,6 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
- bool IterateObjectGroups(ObjectVisitor* scavenge_visitor);
-
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
@@ -1940,15 +2059,19 @@ class Heap {
void CreateFixedStubs();
- MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
+ MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
+ Object* to_number,
+ byte kind);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArray(
ElementsKind elements_kind,
PretenureFlag pretenure = NOT_TENURED);
+ MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite(
+ ElementsKind elements_kind,
+ Handle<Object> allocation_site_info_payload);
+
// Allocate empty fixed array.
MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray();
@@ -1995,7 +2118,6 @@ class Heap {
GCTracer* tracer_;
-
// Allocates a small number to string cache.
MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache();
// Creates and installs the full-sized number string cache.
@@ -2106,7 +2228,7 @@ class Heap {
void ClearObjectStats(bool clear_last_time_stats = false);
- static const int kInitialSymbolTableSize = 2048;
+ static const int kInitialStringTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
static const int kInitialNumberStringCacheSize = 256;
@@ -2117,22 +2239,28 @@ class Heap {
size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
// Maximum GC pause.
- int max_gc_pause_;
+ double max_gc_pause_;
// Total time spent in GC.
- int total_gc_time_ms_;
+ double total_gc_time_ms_;
// Maximum size of objects alive after GC.
intptr_t max_alive_after_gc_;
// Minimal interval between two subsequent collections.
- int min_in_mutator_;
+ double min_in_mutator_;
// Size of objects alive after last GC.
intptr_t alive_after_last_gc_;
double last_gc_end_timestamp_;
+ // Cumulative GC time spent in marking
+ double marking_time_;
+
+ // Cumulative GC time spent in sweeping
+ double sweeping_time_;
+
MarkCompactCollector mark_compact_collector_;
StoreBuffer store_buffer_;
@@ -2150,6 +2278,10 @@ class Heap {
unsigned int gc_count_at_last_idle_gc_;
int scavenges_since_last_idle_round_;
+#ifdef VERIFY_HEAP
+ int no_weak_embedded_maps_verification_scope_depth_;
+#endif
+
static const int kMaxMarkSweepsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
@@ -2162,6 +2294,8 @@ class Heap {
ExternalStringTable external_string_table_;
+ ErrorObjectList error_object_list_;
+
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
MemoryChunk* chunks_queued_for_free_;
@@ -2177,6 +2311,9 @@ class Heap {
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class MapCompact;
+#ifdef VERIFY_HEAP
+ friend class NoWeakEmbeddedMapsVerificationScope;
+#endif
DISALLOW_COPY_AND_ASSIGN(Heap);
};
@@ -2237,6 +2374,14 @@ class AlwaysAllocateScope {
DisallowAllocationFailure disallow_allocation_failure_;
};
+#ifdef VERIFY_HEAP
+class NoWeakEmbeddedMapsVerificationScope {
+ public:
+ inline NoWeakEmbeddedMapsVerificationScope();
+ inline ~NoWeakEmbeddedMapsVerificationScope();
+};
+#endif
+
// Visitor class to verify interior pointers in spaces that do not contain
// or care about intergenerational references. All heap object pointers have to
@@ -2249,37 +2394,40 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
-// Space iterator for iterating over all spaces of the heap.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all spaces of the heap. Returns each space
+// in turn, and null when it is done.
class AllSpaces BASE_EMBEDDED {
public:
+ explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
Space* next();
- AllSpaces() { counter_ = FIRST_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.
-// Returns each space in turn, and null when it is done.
+// space, old data space and code space. Returns each space in turn, and null
+// when it is done.
class OldSpaces BASE_EMBEDDED {
public:
+ explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
OldSpace* next();
- OldSpaces() { counter_ = OLD_POINTER_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
-// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space, code space and cell space.
-// Returns each space in turn, and null when it is done.
+// Space iterator for iterating over all the paged spaces of the heap: Map
+// space, old pointer space, old data space, code space and cell space. Returns
+// each space in turn, and null when it is done.
class PagedSpaces BASE_EMBEDDED {
public:
+ explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
PagedSpace* next();
- PagedSpaces() { counter_ = OLD_POINTER_SPACE; }
private:
+ Heap* heap_;
int counter_;
};
@@ -2289,8 +2437,8 @@ class PagedSpaces BASE_EMBEDDED {
// returned object iterators is handled by the space iterator.
class SpaceIterator : public Malloced {
public:
- SpaceIterator();
- explicit SpaceIterator(HeapObjectCallback size_func);
+ explicit SpaceIterator(Heap* heap);
+ SpaceIterator(Heap* heap, HeapObjectCallback size_func);
virtual ~SpaceIterator();
bool has_next();
@@ -2299,6 +2447,7 @@ class SpaceIterator : public Malloced {
private:
ObjectIterator* CreateIterator();
+ Heap* heap_;
int current_space_; // from enum AllocationSpace.
ObjectIterator* iterator_; // object iterator for the current space.
HeapObjectCallback size_func_;
@@ -2323,8 +2472,8 @@ class HeapIterator BASE_EMBEDDED {
kFilterUnreachable
};
- HeapIterator();
- explicit HeapIterator(HeapObjectsFiltering filtering);
+ explicit HeapIterator(Heap* heap);
+ HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
~HeapIterator();
HeapObject* next();
@@ -2337,6 +2486,7 @@ class HeapIterator BASE_EMBEDDED {
void Shutdown();
HeapObject* NextObject();
+ Heap* heap_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
// Space iterator for iterating all the spaces.
@@ -2414,7 +2564,7 @@ class DescriptorLookupCache {
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
int Lookup(Map* source, String* name) {
- if (!StringShape(name).IsSymbol()) return kAbsent;
+ if (!StringShape(name).IsInternalized()) return kAbsent;
int index = Hash(source, name);
Key& key = keys_[index];
if ((key.source == source) && (key.name == name)) return results_[index];
@@ -2424,7 +2574,7 @@ class DescriptorLookupCache {
// Update an element in the cache.
void Update(Map* source, String* name, int result) {
ASSERT(result != kAbsent);
- if (StringShape(name).IsSymbol()) {
+ if (StringShape(name).IsInternalized()) {
int index = Hash(source, name);
Key& key = keys_[index];
key.source = source;
@@ -2562,6 +2712,18 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size;
}
+ void increment_nodes_died_in_new_space() {
+ nodes_died_in_new_space_++;
+ }
+
+ void increment_nodes_copied_in_new_space() {
+ nodes_copied_in_new_space_++;
+ }
+
+ void increment_nodes_promoted() {
+ nodes_promoted_++;
+ }
+
private:
// Returns a string matching the collector.
const char* CollectorString();
@@ -2606,6 +2768,15 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
+ // Number of died nodes in the new space.
+ int nodes_died_in_new_space_;
+
+ // Number of copied nodes to the new space.
+ int nodes_copied_in_new_space_;
+
+ // Number of promoted nodes to the old space.
+ int nodes_promoted_;
+
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
@@ -2774,7 +2945,7 @@ class IntrusiveMarking {
};
-#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
+#ifdef DEBUG
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
// object to the search target object.
@@ -2831,7 +3002,7 @@ class PathTracer : public ObjectVisitor {
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
-#endif // DEBUG || LIVE_OBJECT_LIST
+#endif // DEBUG
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.cc b/src/3rdparty/v8/src/hydrogen-instructions.cc
index c8edcff..37bfb8f 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.cc
+++ b/src/3rdparty/v8/src/hydrogen-instructions.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "double.h"
#include "factory.h"
#include "hydrogen.h"
@@ -75,6 +76,12 @@ int HValue::LoopWeight() const {
}
+Isolate* HValue::isolate() const {
+ ASSERT(block() != NULL);
+ return block()->graph()->isolate();
+}
+
+
void HValue::AssumeRepresentation(Representation r) {
if (CheckFlag(kFlexibleRepresentation)) {
ChangeRepresentation(r);
@@ -85,6 +92,81 @@ void HValue::AssumeRepresentation(Representation r) {
}
+void HValue::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HValue::RepresentationFromUses() {
+ if (HasNoUses()) return Representation::None();
+
+ // Array of use counts for each representation.
+ int use_count[Representation::kNumRepresentations] = { 0 };
+
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->observed_input_representation(it.index());
+ if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("#%d %s is used by #%d %s as %s%s\n",
+ id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
+ (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+ }
+ use_count[rep.kind()] += use->LoopWeight();
+ }
+ if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
+ int tagged_count = use_count[Representation::kTagged];
+ int double_count = use_count[Representation::kDouble];
+ int int32_count = use_count[Representation::kInteger32];
+
+ if (tagged_count > 0) return Representation::Tagged();
+ if (double_count > 0) return Representation::Double();
+ if (int32_count > 0) return Representation::Integer32();
+
+ return Representation::None();
+}
+
+
+void HValue::UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ Representation r = representation();
+ if (new_rep.is_more_general_than(r)) {
+ // When an HConstant is marked "not convertible to integer", then
+ // never try to represent it as an integer.
+ if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
+ new_rep = Representation::Tagged();
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
+ " (%s want i)\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ } else {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+ id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+ }
+ }
+ ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(h_infer);
+ }
+}
+
+
+void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ h_infer->AddToWorklist(it.value());
+ }
+ for (int i = 0; i < OperandCount(); ++i) {
+ h_infer->AddToWorklist(OperandAt(i));
+ }
+}
+
+
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
@@ -266,7 +348,11 @@ const char* HType::ToString() {
}
-HType HType::TypeFromValue(Handle<Object> value) {
+HType HType::TypeFromValue(Isolate* isolate, Handle<Object> value) {
+ // Handle dereferencing is safe here: an object's type as checked below
+ // never changes.
+ AllowHandleDereference allow_handle_deref(isolate);
+
HType result = HType::Tagged();
if (value->IsSmi()) {
result = HType::Smi();
@@ -285,6 +371,55 @@ HType HType::TypeFromValue(Handle<Object> value) {
}
+bool HValue::Dominates(HValue* dominator, HValue* dominated) {
+ if (dominator->block() != dominated->block()) {
+ // If they are in different blocks we can use the dominance relation
+ // between the blocks.
+ return dominator->block()->Dominates(dominated->block());
+ } else {
+ // Otherwise we must see which instruction comes first, considering
+ // that phis always precede regular instructions.
+ if (dominator->IsInstruction()) {
+ if (dominated->IsInstruction()) {
+ for (HInstruction* next = HInstruction::cast(dominator)->next();
+ next != NULL;
+ next = next->next()) {
+ if (next == dominated) return true;
+ }
+ return false;
+ } else if (dominated->IsPhi()) {
+ return false;
+ } else {
+ UNREACHABLE();
+ }
+ } else if (dominator->IsPhi()) {
+ if (dominated->IsInstruction()) {
+ return true;
+ } else {
+ // We cannot compare which phi comes first.
+ UNREACHABLE();
+ }
+ } else {
+ UNREACHABLE();
+ }
+ return false;
+ }
+}
+
+
+bool HValue::TestDominanceUsingProcessedFlag(HValue* dominator,
+ HValue* dominated) {
+ if (dominator->block() != dominated->block()) {
+ return dominator->block()->Dominates(dominated->block());
+ } else {
+ // If both arguments are in the same block we check if dominator is a phi
+ // or if dominated has not already been processed: in either case we know
+ // that dominator precedes dominated.
+ return dominator->IsPhi() || !dominated->CheckFlag(kIDefsProcessingDone);
+ }
+}
+
+
bool HValue::IsDefinedAfter(HBasicBlock* other) const {
return block()->block_id() > other->block_id();
}
@@ -301,6 +436,7 @@ HUseListNode* HUseListNode::tail() {
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
@@ -396,6 +532,16 @@ const char* HValue::Mnemonic() const {
}
+bool HValue::IsInteger32Constant() {
+ return IsConstant() && HConstant::cast(this)->HasInteger32Value();
+}
+
+
+int32_t HValue::GetInteger32Constant() {
+ return HConstant::cast(this)->Integer32Value();
+}
+
+
void HValue::SetOperandAt(int index, HValue* value) {
RegisterUse(index, value);
InternalSetOperandAt(index, value);
@@ -491,6 +637,11 @@ void HValue::PrintNameTo(StringStream* stream) {
}
+bool HValue::HasMonomorphicJSObjectType() {
+ return !GetMonomorphicJSObjectMap().is_null();
+}
+
+
bool HValue::UpdateInferredType() {
HType type = CalculateInferredType();
bool result = (!type.Equals(type_));
@@ -657,10 +808,74 @@ void HInstruction::Verify() {
// HValue::DataEquals. The default implementation is UNREACHABLE. We
// don't actually care whether DataEquals returns true or false here.
if (CheckFlag(kUseGVN)) DataEquals(this);
+
+ // Verify that all uses are in the graph.
+ for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
+ if (use.value()->IsInstruction()) {
+ ASSERT(HInstruction::cast(use.value())->IsLinked());
+ }
+ }
}
#endif
+HNumericConstraint* HNumericConstraint::AddToGraph(
+ HValue* constrained_value,
+ NumericRelation relation,
+ HValue* related_value,
+ HInstruction* insertion_point) {
+ if (insertion_point == NULL) {
+ if (constrained_value->IsInstruction()) {
+ insertion_point = HInstruction::cast(constrained_value);
+ } else if (constrained_value->IsPhi()) {
+ insertion_point = constrained_value->block()->first();
+ } else {
+ UNREACHABLE();
+ }
+ }
+ HNumericConstraint* result =
+ new(insertion_point->block()->zone()) HNumericConstraint(
+ constrained_value, relation, related_value);
+ result->InsertAfter(insertion_point);
+ return result;
+}
+
+
+void HNumericConstraint::PrintDataTo(StringStream* stream) {
+ stream->Add("(");
+ constrained_value()->PrintNameTo(stream);
+ stream->Add(" %s ", relation().Mnemonic());
+ related_value()->PrintNameTo(stream);
+ stream->Add(")");
+}
+
+
+HInductionVariableAnnotation* HInductionVariableAnnotation::AddToGraph(
+ HPhi* phi,
+ NumericRelation relation,
+ int operand_index) {
+ HInductionVariableAnnotation* result =
+ new(phi->block()->zone()) HInductionVariableAnnotation(phi, relation,
+ operand_index);
+ result->InsertAfter(phi->block()->first());
+ return result;
+}
+
+
+void HInductionVariableAnnotation::PrintDataTo(StringStream* stream) {
+ stream->Add("(");
+ RedefinedOperand()->PrintNameTo(stream);
+ stream->Add(" %s ", relation().Mnemonic());
+ induction_base()->PrintNameTo(stream);
+ stream->Add(")");
+}
+
+
+void HDummyUse::PrintDataTo(StringStream* stream) {
+ value()->PrintNameTo(stream);
+}
+
+
void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
@@ -677,10 +892,59 @@ void HBinaryCall::PrintDataTo(StringStream* stream) {
}
+void HBoundsCheck::AddInformativeDefinitions() {
+ // TODO(mmassi): Executing this code during AddInformativeDefinitions
+ // is a hack. Move it to some other HPhase.
+ if (index()->IsRelationTrue(NumericRelation::Ge(),
+ block()->graph()->GetConstant0()) &&
+ index()->IsRelationTrue(NumericRelation::Lt(), length())) {
+ set_skip_check(true);
+ }
+}
+
+
+bool HBoundsCheck::IsRelationTrueInternal(NumericRelation relation,
+ HValue* related_value) {
+ if (related_value == length()) {
+ // A HBoundsCheck is smaller than the length it compared against.
+ return NumericRelation::Lt().Implies(relation);
+ } else if (related_value == block()->graph()->GetConstant0()) {
+ // A HBoundsCheck is greater than or equal to zero.
+ return NumericRelation::Ge().Implies(relation);
+ } else {
+ return false;
+ }
+}
+
+
void HBoundsCheck::PrintDataTo(StringStream* stream) {
index()->PrintNameTo(stream);
stream->Add(" ");
length()->PrintNameTo(stream);
+ if (skip_check()) {
+ stream->Add(" [DISABLED]");
+ }
+}
+
+
+void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation r;
+ if (key_mode_ == DONT_ALLOW_SMI_KEY ||
+ !length()->representation().IsTagged()) {
+ r = Representation::Integer32();
+ } else if (index()->representation().IsTagged() ||
+ (index()->ActualValue()->IsConstant() &&
+ HConstant::cast(index()->ActualValue())->HasSmiValue())) {
+ // If the index is tagged, or a constant that holds a Smi, allow the length
+ // to be tagged, since it is usually already tagged from loading it out of
+ // the length field of a JSArray. This allows for direct comparison without
+ // untagging.
+ r = Representation::Tagged();
+ } else {
+ r = Representation::Integer32();
+ }
+ UpdateRepresentation(r, h_infer, "boundscheck");
}
@@ -730,6 +994,13 @@ void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
+void HWrapReceiver::PrintDataTo(StringStream* stream) {
+ receiver()->PrintNameTo(stream);
+ stream->Add(" ");
+ function()->PrintNameTo(stream);
+}
+
+
void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintNameTo(stream);
stream->Add("[");
@@ -769,6 +1040,24 @@ void HReturn::PrintDataTo(StringStream* stream) {
}
+Representation HBranch::observed_input_representation(int index) {
+ static const ToBooleanStub::Types tagged_types(
+ ToBooleanStub::UNDEFINED |
+ ToBooleanStub::NULL_TYPE |
+ ToBooleanStub::SPEC_OBJECT |
+ ToBooleanStub::STRING);
+ if (expected_input_types_.ContainsAnyOf(tagged_types)) {
+ return Representation::Tagged();
+ } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ return Representation::Double();
+ } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+ return Representation::Integer32();
+ } else {
+ return Representation::None();
+ }
+}
+
+
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@@ -930,6 +1219,12 @@ void HTypeof::PrintDataTo(StringStream* stream) {
}
+void HForceRepresentation::PrintDataTo(StringStream* stream) {
+ stream->Add("%s ", representation().Mnemonic());
+ value()->PrintNameTo(stream);
+}
+
+
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
@@ -942,8 +1237,10 @@ void HChange::PrintDataTo(StringStream* stream) {
void HJSArrayLength::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
+ if (HasTypeCheck()) {
+ stream->Add(" ");
+ typecheck()->PrintNameTo(stream);
+ }
}
@@ -1010,10 +1307,12 @@ HValue* HCheckInstanceType::Canonicalize() {
value()->type().IsString()) {
return NULL;
}
- if (check_ == IS_SYMBOL &&
- value()->IsConstant() &&
- HConstant::cast(value())->handle()->IsSymbol()) {
- return NULL;
+
+ if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
+ // Dereferencing is safe here:
+ // an internalized string cannot become non-internalized.
+ AllowHandleDereference allow_handle_deref(isolate());
+ if (HConstant::cast(value())->handle()->IsInternalizedString()) return NULL;
}
return this;
}
@@ -1043,9 +1342,9 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
*mask = kIsNotStringMask;
*tag = kStringTag;
return;
- case IS_SYMBOL:
- *mask = kIsSymbolMask;
- *tag = kSymbolTag;
+ case IS_INTERNALIZED_STRING:
+ *mask = kIsInternalizedMask;
+ *tag = kInternalizedTag;
return;
default:
UNREACHABLE();
@@ -1053,10 +1352,32 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
}
+void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
+ HValue* dominator) {
+ ASSERT(side_effect == kChangesMaps);
+ // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once
+ // type information is rich enough we should generalize this to any HType
+ // for which the map is known.
+ if (HasNoUses() && dominator->IsStoreNamedField()) {
+ HStoreNamedField* store = HStoreNamedField::cast(dominator);
+ Handle<Map> map = store->transition();
+ if (map.is_null() || store->object() != value()) return;
+ for (int i = 0; i < map_set()->length(); i++) {
+ if (map.is_identical_to(map_set()->at(i))) {
+ DeleteAndReplaceWith(NULL);
+ return;
+ }
+ }
+ }
+}
+
+
void HLoadElements::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
- stream->Add(" ");
- typecheck()->PrintNameTo(stream);
+ if (HasTypeCheck()) {
+ stream->Add(" ");
+ typecheck()->PrintNameTo(stream);
+ }
}
@@ -1081,7 +1402,7 @@ const char* HCheckInstanceType::GetCheckName() {
case IS_SPEC_OBJECT: return "object";
case IS_JS_ARRAY: return "array";
case IS_STRING: return "string";
- case IS_SYMBOL: return "symbol";
+ case IS_INTERNALIZED_STRING: return "internalized_string";
}
UNREACHABLE();
return "";
@@ -1094,7 +1415,8 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) {
void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) {
- stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder());
+ stream->Add("[receiver_prototype=%p,holder=%p]",
+ *prototypes_.first(), *prototypes_.last());
}
@@ -1249,6 +1571,11 @@ Range* HMod::InferRange(Zone* zone) {
if (a->CanBeMinusZero() || a->CanBeNegative()) {
result->set_can_be_minus_zero(true);
}
+
+ if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) {
+ SetFlag(HValue::kCanOverflow);
+ }
+
if (!right()->range()->CanBeZero()) {
ClearFlag(HValue::kCanBeDivByZero);
}
@@ -1259,6 +1586,57 @@ Range* HMod::InferRange(Zone* zone) {
}
+void HPhi::AddInformativeDefinitions() {
+ if (OperandCount() == 2) {
+ // If one of the operands is an OSR block give up (this cannot be an
+ // induction variable).
+ if (OperandAt(0)->block()->is_osr_entry() ||
+ OperandAt(1)->block()->is_osr_entry()) return;
+
+ for (int operand_index = 0; operand_index < 2; operand_index++) {
+ int other_operand_index = (operand_index + 1) % 2;
+
+ static NumericRelation relations[] = {
+ NumericRelation::Ge(),
+ NumericRelation::Le()
+ };
+
+ // Check if this phi is an induction variable. If, e.g., we know that
+ // its first input is greater than the phi itself, then that must be
+ // the back edge, and the phi is always greater than its second input.
+ for (int relation_index = 0; relation_index < 2; relation_index++) {
+ if (OperandAt(operand_index)->IsRelationTrue(relations[relation_index],
+ this)) {
+ HInductionVariableAnnotation::AddToGraph(this,
+ relations[relation_index],
+ other_operand_index);
+ }
+ }
+ }
+ }
+}
+
+
+bool HPhi::IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+ if (CheckFlag(kNumericConstraintEvaluationInProgress)) return false;
+
+ SetFlag(kNumericConstraintEvaluationInProgress);
+ bool result = true;
+ for (int i = 0; i < OperandCount(); i++) {
+ // Skip OSR entry blocks
+ if (OperandAt(i)->block()->is_osr_entry()) continue;
+
+ if (!OperandAt(i)->IsRelationTrue(relation, other)) {
+ result = false;
+ break;
+ }
+ }
+ ClearFlag(kNumericConstraintEvaluationInProgress);
+
+ return result;
+}
+
+
Range* HMathMinMax::InferRange(Zone* zone) {
if (representation().IsInteger32()) {
Range* a = left()->range();
@@ -1344,15 +1722,11 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->ObservedInputRepresentation(it.index());
+ Representation rep = value->observed_input_representation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- this->id(),
- this->Mnemonic(),
- value->id(),
- value->Mnemonic(),
- rep.Mnemonic());
+ PrintF("#%d Phi is used by real #%d %s as %s\n",
+ id(), value->id(), value->Mnemonic(), rep.Mnemonic());
}
}
}
@@ -1361,11 +1735,8 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
- this->id(),
- this->Mnemonic(),
- other->id(),
- other->Mnemonic(),
+ PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
+ id(), other->id(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@@ -1384,9 +1755,20 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
-void HPhi::ResetInteger32Uses() {
- non_phi_uses_[Representation::kInteger32] = 0;
- indirect_uses_[Representation::kInteger32] = 0;
+void HSimulate::MergeInto(HSimulate* other) {
+ for (int i = 0; i < values_.length(); ++i) {
+ HValue* value = values_[i];
+ if (HasAssignedIndexAt(i)) {
+ other->AddAssignedValue(GetAssignedIndexAt(i), value);
+ } else {
+ if (other->pop_count_ > 0) {
+ other->pop_count_--;
+ } else {
+ other->AddPushedValue(value);
+ }
+ }
+ }
+ other->pop_count_ += pop_count();
}
@@ -1395,7 +1777,7 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
- for (int i = 0; i < values_.length(); ++i) {
+ for (int i = values_.length() - 1; i >= 0; --i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
@@ -1434,8 +1816,8 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
has_double_value_(false) {
- set_representation(r);
- SetFlag(kUseGVN);
+ // Dereferencing here is safe: the value of a number object does not change.
+ AllowHandleDereference allow_handle_deref(Isolate::Current());
if (handle_->IsNumber()) {
double n = handle_->Number();
has_int32_value_ = IsInteger32(n);
@@ -1443,6 +1825,16 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n;
has_double_value_ = true;
}
+ if (r.IsNone()) {
+ if (has_int32_value_) {
+ r = Representation::Integer32();
+ } else if (has_double_value_) {
+ r = Representation::Double();
+ } else {
+ r = Representation::Tagged();
+ }
+ }
+ Initialize(r);
}
@@ -1451,8 +1843,7 @@ HConstant::HConstant(int32_t integer_value, Representation r)
has_double_value_(true),
int32_value_(integer_value),
double_value_(FastI2D(integer_value)) {
- set_representation(r);
- SetFlag(kUseGVN);
+ Initialize(r);
}
@@ -1461,8 +1852,16 @@ HConstant::HConstant(double double_value, Representation r)
has_double_value_(true),
int32_value_(DoubleToInt32(double_value)),
double_value_(double_value) {
+ Initialize(r);
+}
+
+
+void HConstant::Initialize(Representation r) {
set_representation(r);
SetFlag(kUseGVN);
+ if (representation().IsInteger32()) {
+ ClearGVNFlag(kDependsOnOsrEntries);
+ }
}
@@ -1503,12 +1902,14 @@ bool HConstant::ToBoolean() {
double v = DoubleValue();
return v != 0 && !isnan(v);
}
- Handle<Object> literal = handle();
- if (literal->IsTrue()) return true;
- if (literal->IsFalse()) return false;
- if (literal->IsUndefined()) return false;
- if (literal->IsNull()) return false;
- if (literal->IsString() && String::cast(*literal)->length() == 0) {
+ // Dereferencing is safe: singletons do not change and strings are
+ // immutable.
+ AllowHandleDereference allow_handle_deref(isolate());
+ if (handle_->IsTrue()) return true;
+ if (handle_->IsFalse()) return false;
+ if (handle_->IsUndefined()) return false;
+ if (handle_->IsNull()) return false;
+ if (handle_->IsString() && String::cast(*handle_)->length() == 0) {
return false;
}
return true;
@@ -1541,6 +1942,60 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
+void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ // When the operation has information about its own output type, don't look
+ // at uses.
+ if (!observed_output_representation_.IsNone()) return;
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+}
+
+
+Representation HBinaryOperation::RepresentationFromInputs() {
+ // Determine the worst case of observed input representations and
+ // the currently assumed output representation.
+ Representation rep = representation();
+ if (observed_output_representation_.is_more_general_than(rep)) {
+ rep = observed_output_representation_;
+ }
+ for (int i = 1; i <= 2; ++i) {
+ Representation input_rep = observed_input_representation(i);
+ if (input_rep.is_more_general_than(rep)) rep = input_rep;
+ }
+ // If any of the actual input representation is more general than what we
+ // have so far but not Tagged, use that representation instead.
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+
+ if (left_rep.is_more_general_than(rep) &&
+ left()->CheckFlag(kFlexibleRepresentation)) {
+ rep = left_rep;
+ }
+ if (right_rep.is_more_general_than(rep) &&
+ right()->CheckFlag(kFlexibleRepresentation)) {
+ rep = right_rep;
+ }
+ return rep;
+}
+
+
+void HBinaryOperation::AssumeRepresentation(Representation r) {
+ set_observed_input_representation(r, r);
+ HValue::AssumeRepresentation(r);
+}
+
+
+void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ // Do not care about uses.
+}
+
+
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
@@ -1649,6 +2104,16 @@ void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
}
+void HCompareIDAndBranch::AddInformativeDefinitions() {
+ NumericRelation r = NumericRelation::FromToken(token());
+ if (r.IsNone()) return;
+
+ HNumericConstraint::AddToGraph(left(), r, right(), SuccessorAt(0)->first());
+ HNumericConstraint::AddToGraph(
+ left(), r.Negated(), right(), SuccessorAt(1)->first());
+}
+
+
void HCompareIDAndBranch::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
@@ -1672,9 +2137,19 @@ void HGoto::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
- input_representation_ = r;
- if (r.IsDouble()) {
+void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
+ Representation rep = Representation::None();
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ bool observed_integers =
+ observed_input_representation(0).IsInteger32() &&
+ observed_input_representation(1).IsInteger32();
+ bool inputs_are_not_doubles =
+ !left_rep.IsDouble() && !right_rep.IsDouble();
+ if (observed_integers && inputs_are_not_doubles) {
+ rep = Representation::Integer32();
+ } else {
+ rep = Representation::Double();
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@@ -1691,9 +2166,8 @@ void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
- } else {
- ASSERT(r.IsInteger32());
}
+ ChangeRepresentation(rep);
}
@@ -1788,8 +2262,6 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
// (which would just be a map check and return undefined).
!map->is_dictionary_map() &&
!map->has_named_interceptor() &&
- // TODO Do we really need this? (since version 3.13.0)
- //!map->named_interceptor_is_fallback() &&
PrototypeChainCanNeverResolve(map, name)) {
negative_lookups.Add(types->at(i), zone);
}
@@ -1859,31 +2331,51 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("] ");
- dependency()->PrintNameTo(stream);
+ if (IsDehoisted()) {
+ stream->Add(" + %d]", index_offset());
+ } else {
+ stream->Add("]");
+ }
+
+ if (HasDependency()) {
+ stream->Add(" ");
+ dependency()->PrintNameTo(stream);
+ }
+
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
}
}
-bool HLoadKeyed::RequiresHoleCheck() const {
+bool HLoadKeyed::UsesMustHandleHole() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
+ if (hole_mode() == ALLOW_RETURN_HOLE) return true;
+
if (IsFastDoubleElementsKind(elements_kind())) {
- return true;
+ return false;
}
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (!use->IsChange()) {
- return true;
+ return false;
}
}
- return false;
+ return true;
+}
+
+
+bool HLoadKeyed::RequiresHoleCheck() const {
+ if (IsFastPackedElementsKind(elements_kind())) {
+ return false;
+ }
+
+ return !UsesMustHandleHole();
}
@@ -1968,7 +2460,12 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[");
key()->PrintNameTo(stream);
- stream->Add("] = ");
+ if (IsDehoisted()) {
+ stream->Add(" + %d] = ", index_offset());
+ } else {
+ stream->Add("] = ");
+ }
+
value()->PrintNameTo(stream);
}
@@ -2072,6 +2569,14 @@ HType HCheckSmi::CalculateInferredType() {
}
+void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ Representation r = value()->representation().IsTagged()
+ ? Representation::Tagged() : Representation::Integer32();
+ UpdateRepresentation(r, h_infer, "checksmiorint32");
+}
+
+
HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
@@ -2087,7 +2592,7 @@ HType HConstant::CalculateInferredType() {
return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber();
}
if (has_double_value_) return HType::HeapNumber();
- return HType::TypeFromValue(handle_);
+ return HType::TypeFromValue(isolate(), handle_);
}
@@ -2152,6 +2657,11 @@ HType HAllocateObject::CalculateInferredType() {
}
+HType HAllocate::CalculateInferredType() {
+ return type_;
+}
+
+
HType HFastLiteral::CalculateInferredType() {
// TODO(mstarzinger): Be smarter, could also be JSArray here.
return HType::JSObject();
@@ -2273,35 +2783,40 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
bool HStoreKeyed::NeedsCanonicalization() {
- // If value is an integer or comes from the result of a keyed load
- // then it will be a non-hole value: no need for canonicalization.
- if (value()->IsLoadKeyed() ||
- (value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
+ // If value is an integer or smi or comes from the result of a keyed load or
+ // constant then it is either be a non-hole value or in the case of a constant
+ // the hole is only being stored explicitly: no need for canonicalization.
+ if (value()->IsLoadKeyed() || value()->IsConstant()) {
return false;
}
+
+ if (value()->IsChange()) {
+ if (HChange::cast(value())->from().IsInteger32()) {
+ return false;
+ }
+ if (HChange::cast(value())->value()->type().IsSmi()) {
+ return false;
+ }
+ }
return true;
}
#define H_CONSTANT_INT32(val) \
-new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \
- Representation::Integer32())
+new(zone) HConstant(static_cast<int32_t>(val), Representation::Integer32())
#define H_CONSTANT_DOUBLE(val) \
-new(zone) HConstant(FACTORY->NewNumber(val, TENURED), \
- Representation::Double())
+new(zone) HConstant(static_cast<double>(val), Representation::Double())
#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \
-HInstruction* HInstr::New##HInstr(Zone* zone, \
- HValue* context, \
- HValue* left, \
- HValue* right) { \
- if (left->IsConstant() && right->IsConstant()) { \
+HInstruction* HInstr::New( \
+ Zone* zone, HValue* context, HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
HConstant* c_left = HConstant::cast(left); \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \
if (TypeInfo::IsInt32Double(double_res)) { \
- return H_CONSTANT_INT32(static_cast<int32_t>(double_res)); \
+ return H_CONSTANT_INT32(double_res); \
} \
return H_CONSTANT_DOUBLE(double_res); \
} \
@@ -2317,11 +2832,170 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
-HInstruction* HMod::NewHMod(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HStringAdd::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_right = HConstant::cast(right);
+ HConstant* c_left = HConstant::cast(left);
+ if (c_left->HasStringValue() && c_right->HasStringValue()) {
+ return new(zone) HConstant(FACTORY->NewConsString(c_left->StringValue(),
+ c_right->StringValue()),
+ Representation::Tagged());
+ }
+ }
+ return new(zone) HStringAdd(context, left, right);
+}
+
+
+HInstruction* HStringCharFromCode::New(
+ Zone* zone, HValue* context, HValue* char_code) {
+ if (FLAG_fold_constants && char_code->IsConstant()) {
+ HConstant* c_code = HConstant::cast(char_code);
+ Isolate* isolate = Isolate::Current();
+ if (c_code->HasNumberValue()) {
+ if (isfinite(c_code->DoubleValue())) {
+ uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
+ return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate,
+ code),
+ Representation::Tagged());
+ }
+ return new(zone) HConstant(isolate->factory()->empty_string(),
+ Representation::Tagged());
+ }
+ }
+ return new(zone) HStringCharFromCode(context, char_code);
+}
+
+
+HInstruction* HStringLength::New(Zone* zone, HValue* string) {
+ if (FLAG_fold_constants && string->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ if (c_string->HasStringValue()) {
+ return H_CONSTANT_INT32(c_string->StringValue()->length());
+ }
+ }
+ return new(zone) HStringLength(string);
+}
+
+
+HInstruction* HUnaryMathOperation::New(
+ Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) {
+ do {
+ if (!FLAG_fold_constants) break;
+ if (!value->IsConstant()) break;
+ HConstant* constant = HConstant::cast(value);
+ if (!constant->HasNumberValue()) break;
+ double d = constant->DoubleValue();
+ if (isnan(d)) { // NaN poisons everything.
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ }
+ if (isinf(d)) { // +Infinity and -Infinity.
+ switch (op) {
+ case kMathSin:
+ case kMathCos:
+ case kMathTan:
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ case kMathExp:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
+ case kMathLog:
+ case kMathSqrt:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
+ case kMathPowHalf:
+ case kMathAbs:
+ return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
+ case kMathRound:
+ case kMathFloor:
+ return H_CONSTANT_DOUBLE(d);
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ switch (op) {
+ case kMathSin:
+ return H_CONSTANT_DOUBLE(fast_sin(d));
+ case kMathCos:
+ return H_CONSTANT_DOUBLE(fast_cos(d));
+ case kMathTan:
+ return H_CONSTANT_DOUBLE(fast_tan(d));
+ case kMathExp:
+ return H_CONSTANT_DOUBLE(fast_exp(d));
+ case kMathLog:
+ return H_CONSTANT_DOUBLE(fast_log(d));
+ case kMathSqrt:
+ return H_CONSTANT_DOUBLE(fast_sqrt(d));
+ case kMathPowHalf:
+ return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
+ case kMathAbs:
+ return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
+ case kMathRound:
+ // -0.5 .. -0.0 round to -0.0.
+ if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
+ // Doubles are represented as Significant * 2 ^ Exponent. If the
+ // Exponent is not negative, the double value is already an integer.
+ if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
+ return H_CONSTANT_DOUBLE(floor(d + 0.5));
+ case kMathFloor:
+ return H_CONSTANT_DOUBLE(floor(d));
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } while (false);
+ return new(zone) HUnaryMathOperation(context, value, op);
+}
+
+
+HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+ double result = power_helper(c_left->DoubleValue(),
+ c_right->DoubleValue());
+ return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result);
+ }
+ }
+ return new(zone) HPower(left, right);
+}
+
+
+HInstruction* HMathMinMax::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+ HConstant* c_left = HConstant::cast(left);
+ HConstant* c_right = HConstant::cast(right);
+ if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+ double d_left = c_left->DoubleValue();
+ double d_right = c_right->DoubleValue();
+ if (op == kMathMin) {
+ if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
+ if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
+ if (d_left == d_right) {
+ // Handle +0 and -0.
+ return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
+ : d_right);
+ }
+ } else {
+ if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
+ if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
+ if (d_left == d_right) {
+ // Handle +0 and -0.
+ return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
+ : d_left);
+ }
+ }
+ // All comparisons failed, must be NaN.
+ return H_CONSTANT_DOUBLE(OS::nan_value());
+ }
+ }
+ return new(zone) HMathMinMax(context, left, right, op);
+}
+
+
+HInstruction* HMod::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
@@ -2340,21 +3014,23 @@ HInstruction* HMod::NewHMod(Zone* zone,
}
-HInstruction* HDiv::NewHDiv(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
+HInstruction* HDiv::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
// If left and right are constant values, try to return a constant value.
- if (left->IsConstant() && right->IsConstant()) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
if (c_right->DoubleValue() != 0) {
double double_res = c_left->DoubleValue() / c_right->DoubleValue();
if (TypeInfo::IsInt32Double(double_res)) {
- return H_CONSTANT_INT32(static_cast<int32_t>(double_res));
+ return H_CONSTANT_INT32(double_res);
}
return H_CONSTANT_DOUBLE(double_res);
+ } else {
+ int sign = Double(c_left->DoubleValue()).Sign() *
+ Double(c_right->DoubleValue()).Sign(); // Right could be -0.
+ return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
}
}
}
@@ -2362,12 +3038,9 @@ HInstruction* HDiv::NewHDiv(Zone* zone,
}
-HInstruction* HBitwise::NewHBitwise(Zone* zone,
- Token::Value op,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HBitwise::New(
+ Zone* zone, Token::Value op, HValue* context, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
@@ -2396,11 +3069,9 @@ HInstruction* HBitwise::NewHBitwise(Zone* zone,
#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \
-HInstruction* HInstr::New##HInstr(Zone* zone, \
- HValue* context, \
- HValue* left, \
- HValue* right) { \
- if (left->IsConstant() && right->IsConstant()) { \
+HInstruction* HInstr::New( \
+ Zone* zone, HValue* context, HValue* left, HValue* right) { \
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \
HConstant* c_left = HConstant::cast(left); \
HConstant* c_right = HConstant::cast(right); \
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \
@@ -2419,19 +3090,16 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
#undef DEFINE_NEW_H_BITWISE_INSTR
-HInstruction* HShr::NewHShr(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right) {
- if (left->IsConstant() && right->IsConstant()) {
+HInstruction* HShr::New(
+ Zone* zone, HValue* context, HValue* left, HValue* right) {
+ if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
HConstant* c_left = HConstant::cast(left);
HConstant* c_right = HConstant::cast(right);
if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
int32_t left_val = c_left->NumberValueAsInteger32();
int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
if ((right_val == 0) && (left_val < 0)) {
- return H_CONSTANT_DOUBLE(
- static_cast<double>(static_cast<uint32_t>(left_val)));
+ return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
}
return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val);
}
@@ -2458,7 +3126,41 @@ void HBitwise::PrintDataTo(StringStream* stream) {
}
-Representation HPhi::InferredRepresentation() {
+void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
+ ASSERT(CheckFlag(kFlexibleRepresentation));
+ // If there are non-Phi uses, and all of them have observed the same
+ // representation, than that's what this Phi is going to use.
+ Representation new_rep = RepresentationObservedByAllNonPhiUses();
+ if (!new_rep.IsNone()) {
+ UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
+ return;
+ }
+ new_rep = RepresentationFromInputs();
+ UpdateRepresentation(new_rep, h_infer, "inputs");
+ new_rep = RepresentationFromUses();
+ UpdateRepresentation(new_rep, h_infer, "uses");
+ new_rep = RepresentationFromUseRequirements();
+ UpdateRepresentation(new_rep, h_infer, "use requirements");
+}
+
+
+Representation HPhi::RepresentationObservedByAllNonPhiUses() {
+ int non_phi_use_count = 0;
+ for (int i = Representation::kInteger32;
+ i < Representation::kNumRepresentations; ++i) {
+ non_phi_use_count += non_phi_uses_[i];
+ }
+ if (non_phi_use_count <= 1) return Representation::None();
+ for (int i = 0; i < Representation::kNumRepresentations; ++i) {
+ if (non_phi_uses_[i] == non_phi_use_count) {
+ return Representation::FromKind(static_cast<Representation::Kind>(i));
+ }
+ }
+ return Representation::None();
+}
+
+
+Representation HPhi::RepresentationFromInputs() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
@@ -2467,6 +3169,7 @@ Representation HPhi::InferredRepresentation() {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
+ if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
@@ -2485,7 +3188,9 @@ Representation HPhi::InferredRepresentation() {
return Representation::Tagged();
}
} else {
- return Representation::Tagged();
+ if (value->IsPhi() && !IsConvertibleToInteger()) {
+ return Representation::Tagged();
+ }
}
}
}
@@ -2498,6 +3203,37 @@ Representation HPhi::InferredRepresentation() {
}
+Representation HPhi::RepresentationFromUseRequirements() {
+ Representation all_uses_require = Representation::None();
+ bool all_uses_require_the_same = true;
+ for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+ // We check for observed_input_representation elsewhere.
+ Representation use_rep =
+ it.value()->RequiredInputRepresentation(it.index());
+ // No useful info from this use -> look at the next one.
+ if (use_rep.IsNone()) {
+ continue;
+ }
+ if (use_rep.Equals(all_uses_require)) {
+ continue;
+ }
+ // This use's representation contradicts what we've seen so far.
+ if (!all_uses_require.IsNone()) {
+ ASSERT(!use_rep.Equals(all_uses_require));
+ all_uses_require_the_same = false;
+ break;
+ }
+ // Otherwise, initialize observed representation.
+ all_uses_require = use_rep;
+ }
+ if (all_uses_require_the_same) {
+ return all_uses_require;
+ }
+
+ return Representation::None();
+}
+
+
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
diff --git a/src/3rdparty/v8/src/hydrogen-instructions.h b/src/3rdparty/v8/src/hydrogen-instructions.h
index 7136657..7c2135d 100644
--- a/src/3rdparty/v8/src/hydrogen-instructions.h
+++ b/src/3rdparty/v8/src/hydrogen-instructions.h
@@ -45,6 +45,7 @@ namespace internal {
// Forward declarations.
class HBasicBlock;
class HEnvironment;
+class HInferRepresentation;
class HInstruction;
class HLoopInformation;
class HValue;
@@ -63,6 +64,7 @@ class LChunkBuilder;
V(AbnormalExit) \
V(AccessArgumentsAt) \
V(Add) \
+ V(Allocate) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -81,6 +83,7 @@ class LChunkBuilder;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(Change) \
@@ -90,6 +93,7 @@ class LChunkBuilder;
V(CheckNonSmi) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
+ V(CheckSmiOrInt32) \
V(ClampToUint8) \
V(ClassOfTestAndBranch) \
V(CompareIDAndBranch) \
@@ -103,6 +107,7 @@ class LChunkBuilder;
V(DeleteProperty) \
V(Deoptimize) \
V(Div) \
+ V(DummyUse) \
V(ElementsKind) \
V(EnterInlined) \
V(FastLiteral) \
@@ -115,9 +120,11 @@ class LChunkBuilder;
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
+ V(InductionVariableAnnotation) \
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
V(IsNilAndBranch) \
@@ -143,6 +150,7 @@ class LChunkBuilder;
V(MathMinMax) \
V(Mod) \
V(Mul) \
+ V(NumericConstraint) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
@@ -154,6 +162,7 @@ class LChunkBuilder;
V(Return) \
V(Ror) \
V(Sar) \
+ V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
V(Simulate) \
@@ -176,6 +185,7 @@ class LChunkBuilder;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -190,6 +200,7 @@ class LChunkBuilder;
V(WrapReceiver)
#define GVN_TRACKED_FLAG_LIST(V) \
+ V(Maps) \
V(NewSpacePromotion)
#define GVN_UNTRACKED_FLAG_LIST(V) \
@@ -202,7 +213,6 @@ class LChunkBuilder;
V(DoubleArrayElements) \
V(SpecializedArrayElements) \
V(GlobalVars) \
- V(Maps) \
V(ArrayLengths) \
V(ContextSlots) \
V(OsrEntries)
@@ -225,11 +235,9 @@ class LChunkBuilder;
#ifdef DEBUG
-#define ASSERT_ALLOCATION_DISABLED do { \
- OptimizingCompilerThread* thread = \
- ISOLATE->optimizing_compiler_thread(); \
- ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \
- } while (0)
+#define ASSERT_ALLOCATION_DISABLED \
+ ASSERT(isolate()->optimizing_compiler_thread()->IsOptimizerThread() || \
+ !isolate()->heap()->IsAllocationAllowed())
#else
#define ASSERT_ALLOCATION_DISABLED do {} while (0)
#endif
@@ -308,9 +316,9 @@ class Representation {
public:
enum Kind {
kNone,
- kTagged,
- kDouble,
kInteger32,
+ kDouble,
+ kTagged,
kExternal,
kNumRepresentations
};
@@ -323,10 +331,18 @@ class Representation {
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
+ static Representation FromKind(Kind kind) { return Representation(kind); }
+
bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
+ bool is_more_general_than(const Representation& other) {
+ ASSERT(kind_ != kExternal);
+ ASSERT(other.kind_ != kExternal);
+ return kind_ > other.kind_;
+ }
+
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
@@ -436,7 +452,7 @@ class HType {
return IsHeapNumber() || IsString() || IsNonPrimitive();
}
- static HType TypeFromValue(Handle<Object> value);
+ static HType TypeFromValue(Isolate* isolate, Handle<Object> value);
const char* ToString();
@@ -536,6 +552,127 @@ enum GVNFlag {
#undef COUNT_FLAG
};
+
+class NumericRelation {
+ public:
+ enum Kind { NONE, EQ, GT, GE, LT, LE, NE };
+ static const char* MnemonicFromKind(Kind kind) {
+ switch (kind) {
+ case NONE: return "NONE";
+ case EQ: return "EQ";
+ case GT: return "GT";
+ case GE: return "GE";
+ case LT: return "LT";
+ case LE: return "LE";
+ case NE: return "NE";
+ }
+ UNREACHABLE();
+ return NULL;
+ }
+ const char* Mnemonic() const { return MnemonicFromKind(kind_); }
+
+ static NumericRelation None() { return NumericRelation(NONE); }
+ static NumericRelation Eq() { return NumericRelation(EQ); }
+ static NumericRelation Gt() { return NumericRelation(GT); }
+ static NumericRelation Ge() { return NumericRelation(GE); }
+ static NumericRelation Lt() { return NumericRelation(LT); }
+ static NumericRelation Le() { return NumericRelation(LE); }
+ static NumericRelation Ne() { return NumericRelation(NE); }
+
+ bool IsNone() { return kind_ == NONE; }
+
+ static NumericRelation FromToken(Token::Value token) {
+ switch (token) {
+ case Token::EQ: return Eq();
+ case Token::EQ_STRICT: return Eq();
+ case Token::LT: return Lt();
+ case Token::GT: return Gt();
+ case Token::LTE: return Le();
+ case Token::GTE: return Ge();
+ case Token::NE: return Ne();
+ case Token::NE_STRICT: return Ne();
+ default: return None();
+ }
+ }
+
+ // The semantics of "Reversed" is that if "x rel y" is true then also
+ // "y rel.Reversed() x" is true, and that rel.Reversed().Reversed() == rel.
+ NumericRelation Reversed() {
+ switch (kind_) {
+ case NONE: return None();
+ case EQ: return Eq();
+ case GT: return Lt();
+ case GE: return Le();
+ case LT: return Gt();
+ case LE: return Ge();
+ case NE: return Ne();
+ }
+ UNREACHABLE();
+ return None();
+ }
+
+ // The semantics of "Negated" is that if "x rel y" is true then also
+ // "!(x rel.Negated() y)" is true.
+ NumericRelation Negated() {
+ switch (kind_) {
+ case NONE: return None();
+ case EQ: return Ne();
+ case GT: return Le();
+ case GE: return Lt();
+ case LT: return Ge();
+ case LE: return Gt();
+ case NE: return Eq();
+ }
+ UNREACHABLE();
+ return None();
+ }
+
+ // The semantics of "Implies" is that if "x rel y" is true
+ // then also "x other_relation y" is true.
+ bool Implies(NumericRelation other_relation) {
+ switch (kind_) {
+ case NONE: return false;
+ case EQ: return (other_relation.kind_ == EQ)
+ || (other_relation.kind_ == GE)
+ || (other_relation.kind_ == LE);
+ case GT: return (other_relation.kind_ == GT)
+ || (other_relation.kind_ == GE)
+ || (other_relation.kind_ == NE);
+ case LT: return (other_relation.kind_ == LT)
+ || (other_relation.kind_ == LE)
+ || (other_relation.kind_ == NE);
+ case GE: return (other_relation.kind_ == GE);
+ case LE: return (other_relation.kind_ == LE);
+ case NE: return (other_relation.kind_ == NE);
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ // The semantics of "IsExtendable" is that if
+ // "rel.IsExtendable(direction)" is true then
+ // "x rel y" implies "(x + direction) rel y" .
+ bool IsExtendable(int direction) {
+ switch (kind_) {
+ case NONE: return false;
+ case EQ: return false;
+ case GT: return (direction >= 0);
+ case GE: return (direction >= 0);
+ case LT: return (direction <= 0);
+ case LE: return (direction <= 0);
+ case NE: return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
+ private:
+ explicit NumericRelation(Kind kind) : kind_(kind) {}
+
+ Kind kind_;
+};
+
+
typedef EnumSet<GVNFlag> GVNFlagSet;
@@ -568,7 +705,13 @@ class HValue: public ZoneObject {
// HGraph::ComputeSafeUint32Operations is responsible for setting this
// flag.
kUint32,
- kLastFlag = kUint32
+ // If a phi is involved in the evaluation of a numeric constraint the
+ // recursion can cause an endless cycle: we use this flag to exit the loop.
+ kNumericConstraintEvaluationInProgress,
+ // This flag is set to true after the SetupInformativeDefinitions() pass
+ // has processed this instruction.
+ kIDefsProcessingDone,
+ kLastFlag = kIDefsProcessingDone
};
STATIC_ASSERT(kLastFlag < kBitsPerInt);
@@ -621,6 +764,9 @@ class HValue: public ZoneObject {
void SetBlock(HBasicBlock* block);
int LoopWeight() const;
+ // Note: Never call this method for an unlinked value.
+ Isolate* isolate() const;
+
int id() const { return id_; }
void set_id(int id) { id_ = id; }
@@ -629,13 +775,15 @@ class HValue: public ZoneObject {
virtual bool EmitAtUses() { return false; }
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
- // Representation was already set and is allowed to be changed.
- ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
RepresentationChanged(r);
representation_ = r;
+ if (r.IsTagged()) {
+ // Tagged is the bottom of the lattice, don't go any further.
+ ClearFlag(kFlexibleRepresentation);
+ }
}
- void AssumeRepresentation(Representation r);
+ virtual void AssumeRepresentation(Representation r);
virtual bool IsConvertibleToInteger() const { return true; }
@@ -660,6 +808,48 @@ class HValue: public ZoneObject {
return NULL;
}
+ // There are HInstructions that do not really change a value, they
+ // only add pieces of information to it (like bounds checks, map checks,
+ // smi checks...).
+ // We call these instructions "informative definitions", or "iDef".
+ // One of the iDef operands is special because it is the value that is
+ // "transferred" to the output, we call it the "redefined operand".
+ // If an HValue is an iDef it must override RedefinedOperandIndex() so that
+ // it does not return kNoRedefinedOperand;
+ static const int kNoRedefinedOperand = -1;
+ virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; }
+ bool IsInformativeDefinition() {
+ return RedefinedOperandIndex() != kNoRedefinedOperand;
+ }
+ HValue* RedefinedOperand() {
+ return IsInformativeDefinition() ? OperandAt(RedefinedOperandIndex())
+ : NULL;
+ }
+
+ // A purely informative definition is an idef that will not emit code and
+ // should therefore be removed from the graph in the RestoreActualValues
+ // phase (so that live ranges will be shorter).
+ virtual bool IsPurelyInformativeDefinition() { return false; }
+
+ // This method must always return the original HValue SSA definition
+ // (regardless of any iDef of this value).
+ HValue* ActualValue() {
+ return IsInformativeDefinition() ? RedefinedOperand()->ActualValue()
+ : this;
+ }
+
+ virtual void AddInformativeDefinitions() {}
+
+ void UpdateRedefinedUsesWhileSettingUpInformativeDefinitions() {
+ UpdateRedefinedUsesInner<TestDominanceUsingProcessedFlag>();
+ }
+ void UpdateRedefinedUses() {
+ UpdateRedefinedUsesInner<Dominates>();
+ }
+
+ bool IsInteger32Constant();
+ int32_t GetInteger32Constant();
+
bool IsDefinedAfter(HBasicBlock* other) const;
// Operands.
@@ -733,16 +923,11 @@ class HValue: public ZoneObject {
void ComputeInitialRange(Zone* zone);
// Representation helpers.
- virtual Representation RequiredInputRepresentation(int index) = 0;
-
- virtual Representation InferredRepresentation() {
- return representation();
- }
-
- // Type feedback access.
- virtual Representation ObservedInputRepresentation(int index) {
- return RequiredInputRepresentation(index);
+ virtual Representation observed_input_representation(int index) {
+ return Representation::None();
}
+ virtual Representation RequiredInputRepresentation(int index) = 0;
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
@@ -762,6 +947,14 @@ class HValue: public ZoneObject {
const char* Mnemonic() const;
+ // Type information helpers.
+ bool HasMonomorphicJSObjectType();
+
+ // TODO(mstarzinger): For now instructions can override this function to
+ // specify statically known types, once HType can convey more information
+ // it should be based on the HType.
+ virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); }
+
// Updated the inferred type of this instruction and returns true if
// it has changed.
bool UpdateInferredType();
@@ -783,6 +976,24 @@ class HValue: public ZoneObject {
virtual void Verify() = 0;
#endif
+ // This method is recursive but it is guaranteed to terminate because
+ // RedefinedOperand() always dominates "this".
+ bool IsRelationTrue(NumericRelation relation, HValue* other) {
+ if (this == other) {
+ return NumericRelation::Eq().Implies(relation);
+ }
+
+ bool result = IsRelationTrueInternal(relation, other) ||
+ other->IsRelationTrueInternal(relation.Reversed(), this);
+ if (!result) {
+ HValue* redefined = RedefinedOperand();
+ if (redefined != NULL) {
+ result = redefined->IsRelationTrue(relation, other);
+ }
+ }
+ return result;
+ }
+
protected:
// This function must be overridden for instructions with flag kUseGVN, to
// compare the non-Operand parts of the instruction.
@@ -790,7 +1001,18 @@ class HValue: public ZoneObject {
UNREACHABLE();
return false;
}
+
+ virtual Representation RepresentationFromInputs() {
+ return representation();
+ }
+ Representation RepresentationFromUses();
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason);
+ void AddDependantsToWorklist(HInferRepresentation* h_infer);
+
virtual void RepresentationChanged(Representation to) { }
+
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
@@ -800,11 +1022,46 @@ class HValue: public ZoneObject {
}
void set_representation(Representation r) {
- // Representation is set-once.
ASSERT(representation_.IsNone() && !r.IsNone());
representation_ = r;
}
+ // Signature of a function testing if a HValue properly dominates another.
+ typedef bool (*DominanceTest)(HValue*, HValue*);
+
+ // Simple implementation of DominanceTest implemented walking the chain
+ // of Hinstructions (used in UpdateRedefinedUsesInner).
+ static bool Dominates(HValue* dominator, HValue* dominated);
+
+ // A fast implementation of DominanceTest that works only for the
+ // "current" instruction in the SetupInformativeDefinitions() phase.
+ // During that phase we use a flag to mark processed instructions, and by
+ // checking the flag we can quickly test if an instruction comes before or
+ // after the "current" one.
+ static bool TestDominanceUsingProcessedFlag(HValue* dominator,
+ HValue* dominated);
+
+ // If we are redefining an operand, update all its dominated uses (the
+ // function that checks if a use is dominated is the template argument).
+ template<DominanceTest TestDominance>
+ void UpdateRedefinedUsesInner() {
+ HValue* input = RedefinedOperand();
+ if (input != NULL) {
+ for (HUseIterator uses = input->uses(); !uses.Done(); uses.Advance()) {
+ HValue* use = uses.value();
+ if (TestDominance(this, use)) {
+ use->SetOperandAt(uses.index(), this);
+ }
+ }
+ }
+ }
+
+ // Informative definitions can override this method to state any numeric
+ // relation they provide on the redefined value.
+ virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+ return false;
+ }
+
static GVNFlagSet AllDependsOnFlagSet() {
GVNFlagSet result;
// Create changes mask.
@@ -1007,6 +1264,73 @@ class HBlockEntry: public HTemplateInstruction<0> {
};
+class HDummyUse: public HTemplateInstruction<1> {
+ public:
+ explicit HDummyUse(HValue* value) {
+ SetOperandAt(0, value);
+ // Pretend to be a Smi so that the HChange instructions inserted
+ // before any use generate as little code as possible.
+ set_representation(Representation::Tagged());
+ set_type(HType::Smi());
+ }
+
+ HValue* value() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::None();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse);
+};
+
+
+class HNumericConstraint : public HTemplateInstruction<2> {
+ public:
+ static HNumericConstraint* AddToGraph(HValue* constrained_value,
+ NumericRelation relation,
+ HValue* related_value,
+ HInstruction* insertion_point = NULL);
+
+ HValue* constrained_value() { return OperandAt(0); }
+ HValue* related_value() { return OperandAt(1); }
+ NumericRelation relation() { return relation_; }
+
+ virtual int RedefinedOperandIndex() { return 0; }
+ virtual bool IsPurelyInformativeDefinition() { return true; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return representation();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual bool IsRelationTrueInternal(NumericRelation other_relation,
+ HValue* other_related_value) {
+ if (related_value() == other_related_value) {
+ return relation().Implies(other_relation);
+ } else {
+ return false;
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(NumericConstraint)
+
+ private:
+ HNumericConstraint(HValue* constrained_value,
+ NumericRelation relation,
+ HValue* related_value)
+ : relation_(relation) {
+ SetOperandAt(0, constrained_value);
+ SetOperandAt(1, related_value);
+ set_representation(constrained_value->representation());
+ }
+
+ NumericRelation relation_;
+};
+
+
// We insert soft-deoptimize when we hit code with unknown typefeedback,
// so that we get a chance of re-optimizing with useful typefeedback.
// HSoftDeoptimize does not end a basic block as opposed to HDeoptimize.
@@ -1113,6 +1437,7 @@ class HBranch: public HUnaryControlInstruction {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
+ virtual Representation observed_input_representation(int index);
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1153,10 +1478,11 @@ class HCompareMap: public HUnaryControlInstruction {
};
-class HReturn: public HTemplateControlInstruction<0, 1> {
+class HReturn: public HTemplateControlInstruction<0, 2> {
public:
- explicit HReturn(HValue* value) {
+ HReturn(HValue* value, HValue* context) {
SetOperandAt(0, value);
+ SetOperandAt(1, context);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -1166,6 +1492,7 @@ class HReturn: public HTemplateControlInstruction<0, 1> {
virtual void PrintDataTo(StringStream* stream);
HValue* value() { return OperandAt(0); }
+ HValue* context() { return OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(Return)
};
@@ -1242,6 +1569,8 @@ class HForceRepresentation: public HTemplateInstruction<1> {
return representation(); // Same as the output representation.
}
+ virtual void PrintDataTo(StringStream* stream);
+
DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
};
@@ -1317,14 +1646,24 @@ class HClampToUint8: public HUnaryOperation {
};
+enum RemovableSimulate {
+ REMOVABLE_SIMULATE,
+ FIXED_SIMULATE
+};
+
+
class HSimulate: public HInstruction {
public:
- HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
+ HSimulate(BailoutId ast_id,
+ int pop_count,
+ Zone* zone,
+ RemovableSimulate removable)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
assigned_indexes_(2, zone),
- zone_(zone) {}
+ zone_(zone),
+ removable_(removable) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
@@ -1358,6 +1697,9 @@ class HSimulate: public HInstruction {
return Representation::None();
}
+ void MergeInto(HSimulate* other);
+ bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
+
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
@@ -1384,6 +1726,7 @@ class HSimulate: public HInstruction {
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
+ RemovableSimulate removable_;
};
@@ -1409,7 +1752,7 @@ class HStackCheck: public HTemplateInstruction<1> {
// The stack check eliminator might try to eliminate the same stack
// check instruction multiple times.
if (IsLinked()) {
- DeleteFromGraph();
+ DeleteAndReplaceWith(NULL);
}
}
@@ -1437,18 +1780,18 @@ class HEnterInlined: public HTemplateInstruction<0> {
HEnterInlined(Handle<JSFunction> closure,
int arguments_count,
FunctionLiteral* function,
- CallKind call_kind,
InliningKind inlining_kind,
Variable* arguments_var,
- ZoneList<HValue*>* arguments_values)
+ ZoneList<HValue*>* arguments_values,
+ bool undefined_receiver)
: closure_(closure),
arguments_count_(arguments_count),
arguments_pushed_(false),
function_(function),
- call_kind_(call_kind),
inlining_kind_(inlining_kind),
arguments_var_(arguments_var),
- arguments_values_(arguments_values) {
+ arguments_values_(arguments_values),
+ undefined_receiver_(undefined_receiver) {
}
virtual void PrintDataTo(StringStream* stream);
@@ -1458,8 +1801,8 @@ class HEnterInlined: public HTemplateInstruction<0> {
bool arguments_pushed() const { return arguments_pushed_; }
void set_arguments_pushed() { arguments_pushed_ = true; }
FunctionLiteral* function() const { return function_; }
- CallKind call_kind() const { return call_kind_; }
InliningKind inlining_kind() const { return inlining_kind_; }
+ bool undefined_receiver() const { return undefined_receiver_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
@@ -1475,10 +1818,10 @@ class HEnterInlined: public HTemplateInstruction<0> {
int arguments_count_;
bool arguments_pushed_;
FunctionLiteral* function_;
- CallKind call_kind_;
InliningKind inlining_kind_;
Variable* arguments_var_;
ZoneList<HValue*>* arguments_values_;
+ bool undefined_receiver_;
};
@@ -1602,7 +1945,8 @@ class HDeclareGlobals: public HUnaryOperation {
class HGlobalObject: public HUnaryOperation {
public:
- explicit HGlobalObject(HValue* context) : HUnaryOperation(context), qml_global_(false) {
+ explicit HGlobalObject(HValue* context)
+ : HUnaryOperation(context), qml_global_(false) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
}
@@ -1881,6 +2225,25 @@ class HCallNew: public HBinaryCall {
};
+class HCallNewArray: public HCallNew {
+ public:
+ HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+ Handle<JSGlobalPropertyCell> type_cell)
+ : HCallNew(context, constructor, argument_count),
+ type_cell_(type_cell) {
+ }
+
+ Handle<JSGlobalPropertyCell> property_cell() const {
+ return type_cell_;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
+
+ private:
+ Handle<JSGlobalPropertyCell> type_cell_;
+};
+
+
class HCallRuntime: public HCall<1> {
public:
HCallRuntime(HValue* context,
@@ -1918,7 +2281,7 @@ class HJSArrayLength: public HTemplateInstruction<2> {
// object. It is guaranteed to be 32 bit integer, but it can be
// represented as either a smi or heap number.
SetOperandAt(0, value);
- SetOperandAt(1, typecheck);
+ SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnArrayLengths);
@@ -1932,7 +2295,11 @@ class HJSArrayLength: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
+ HValue* typecheck() {
+ ASSERT(HasTypeCheck());
+ return OperandAt(1);
+ }
+ bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
DECLARE_CONCRETE_INSTRUCTION(JSArrayLength)
@@ -2023,6 +2390,9 @@ class HBitNot: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
@@ -2039,35 +2409,10 @@ class HBitNot: public HUnaryOperation {
class HUnaryMathOperation: public HTemplateInstruction<2> {
public:
- HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
- : op_(op) {
- SetOperandAt(0, context);
- SetOperandAt(1, value);
- switch (op) {
- case kMathFloor:
- case kMathRound:
- case kMathCeil:
- set_representation(Representation::Integer32());
- break;
- case kMathAbs:
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- case kMathSqrt:
- case kMathPowHalf:
- case kMathLog:
- case kMathSin:
- case kMathCos:
- case kMathTan:
- set_representation(Representation::Double());
- SetGVNFlag(kChangesNewSpacePromotion);
- break;
- default:
- UNREACHABLE();
- }
- SetFlag(kUseGVN);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* value,
+ BuiltinFunctionId op);
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
@@ -2085,10 +2430,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
switch (op_) {
case kMathFloor:
case kMathRound:
- case kMathCeil:
case kMathSqrt:
case kMathPowHalf:
case kMathLog:
+ case kMathExp:
case kMathSin:
case kMathCos:
case kMathTan:
@@ -2116,6 +2461,39 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
}
private:
+ HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
+ : op_(op) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, value);
+ switch (op) {
+ case kMathFloor:
+ case kMathRound:
+ case kMathCeil:
+ set_representation(Representation::Integer32());
+ break;
+ case kMathAbs:
+ // Not setting representation here: it is None intentionally.
+ SetFlag(kFlexibleRepresentation);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ break;
+ case kMathSqrt:
+ case kMathPowHalf:
+ case kMathLog:
+ case kMathSin:
+ case kMathCos:
+ case kMathTan:
+ set_representation(Representation::Double());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ break;
+ case kMathExp:
+ set_representation(Representation::Double());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ SetFlag(kUseGVN);
+ }
+
virtual bool IsDeletable() const { return true; }
BuiltinFunctionId op_;
@@ -2126,14 +2504,18 @@ class HLoadElements: public HTemplateInstruction<2> {
public:
HLoadElements(HValue* value, HValue* typecheck) {
SetOperandAt(0, value);
- SetOperandAt(1, typecheck);
+ SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnElementsPointer);
}
HValue* value() { return OperandAt(0); }
- HValue* typecheck() { return OperandAt(1); }
+ HValue* typecheck() {
+ ASSERT(HasTypeCheck());
+ return OperandAt(1);
+ }
+ bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); }
virtual void PrintDataTo(StringStream* stream);
@@ -2187,6 +2569,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetOperandAt(1, typecheck != NULL ? typecheck : value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
+ SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
map_set()->Add(map, zone);
@@ -2196,6 +2579,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetOperandAt(1, value);
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
+ SetFlag(kTrackSideEffectDominators);
SetGVNFlag(kDependsOnMaps);
SetGVNFlag(kDependsOnElementsKind);
for (int i = 0; i < maps->length(); i++) {
@@ -2230,6 +2614,7 @@ class HCheckMaps: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator);
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
@@ -2260,6 +2645,7 @@ class HCheckFunction: public HUnaryOperation {
: HUnaryOperation(value), target_(function) {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
+ target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2273,6 +2659,7 @@ class HCheckFunction: public HUnaryOperation {
#endif
Handle<JSFunction> target() const { return target_; }
+ bool target_in_new_space() const { return target_in_new_space_; }
DECLARE_CONCRETE_INSTRUCTION(CheckFunction)
@@ -2284,6 +2671,7 @@ class HCheckFunction: public HUnaryOperation {
private:
Handle<JSFunction> target_;
+ bool target_in_new_space_;
};
@@ -2298,8 +2686,9 @@ class HCheckInstanceType: public HUnaryOperation {
static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) {
return new(zone) HCheckInstanceType(value, IS_STRING);
}
- static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) {
- return new(zone) HCheckInstanceType(value, IS_SYMBOL);
+ static HCheckInstanceType* NewIsInternalizedString(
+ HValue* value, Zone* zone) {
+ return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING);
}
virtual void PrintDataTo(StringStream* stream);
@@ -2330,7 +2719,7 @@ class HCheckInstanceType: public HUnaryOperation {
IS_SPEC_OBJECT,
IS_JS_ARRAY,
IS_STRING,
- IS_SYMBOL,
+ IS_INTERNALIZED_STRING,
LAST_INTERVAL_CHECK = IS_JS_ARRAY
};
@@ -2384,14 +2773,24 @@ class HCheckNonSmi: public HUnaryOperation {
class HCheckPrototypeMaps: public HTemplateInstruction<0> {
public:
- HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder)
- : prototype_(prototype), holder_(holder) {
+ HCheckPrototypeMaps(Handle<JSObject> prototype,
+ Handle<JSObject> holder,
+ Zone* zone) : prototypes_(2, zone), maps_(2, zone) {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
+ // Keep a list of all objects on the prototype chain up to the holder
+ // and the expected maps.
+ while (true) {
+ prototypes_.Add(prototype, zone);
+ maps_.Add(Handle<Map>(prototype->map()), zone);
+ if (prototype.is_identical_to(holder)) break;
+ prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype()));
+ }
}
- Handle<JSObject> prototype() const { return prototype_; }
- Handle<JSObject> holder() const { return holder_; }
+ ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; }
+
+ ZoneList<Handle<Map> >* maps() { return &maps_; }
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps)
@@ -2403,21 +2802,42 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
virtual intptr_t Hashcode() {
ASSERT_ALLOCATION_DISABLED;
- intptr_t hash = reinterpret_cast<intptr_t>(*prototype());
- hash = 17 * hash + reinterpret_cast<intptr_t>(*holder());
+ // Dereferencing to use the object's raw address for hashing is safe.
+ AllowHandleDereference allow_handle_deref(isolate());
+ intptr_t hash = 0;
+ for (int i = 0; i < prototypes_.length(); i++) {
+ hash = 17 * hash + reinterpret_cast<intptr_t>(*prototypes_[i]);
+ hash = 17 * hash + reinterpret_cast<intptr_t>(*maps_[i]);
+ }
return hash;
}
+ bool CanOmitPrototypeChecks() {
+ for (int i = 0; i < maps()->length(); i++) {
+ if (!maps()->at(i)->CanOmitPrototypeChecks()) return false;
+ }
+ return true;
+ }
+
protected:
virtual bool DataEquals(HValue* other) {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
- return prototype_.is_identical_to(b->prototype()) &&
- holder_.is_identical_to(b->holder());
+#ifdef DEBUG
+ if (prototypes_.length() != b->prototypes()->length()) return false;
+ for (int i = 0; i < prototypes_.length(); i++) {
+ if (!prototypes_[i].is_identical_to(b->prototypes()->at(i))) return false;
+ if (!maps_[i].is_identical_to(b->maps()->at(i))) return false;
+ }
+ return true;
+#else
+ return prototypes_.first().is_identical_to(b->prototypes()->first()) &&
+ prototypes_.last().is_identical_to(b->prototypes()->last());
+#endif // DEBUG
}
private:
- Handle<JSObject> prototype_;
- Handle<JSObject> holder_;
+ ZoneList<Handle<JSObject> > prototypes_;
+ ZoneList<Handle<Map> > maps_;
};
@@ -2444,6 +2864,38 @@ class HCheckSmi: public HUnaryOperation {
};
+class HCheckSmiOrInt32: public HUnaryOperation {
+ public:
+ explicit HCheckSmiOrInt32(HValue* value) : HUnaryOperation(value) {
+ SetFlag(kFlexibleRepresentation);
+ SetFlag(kUseGVN);
+ }
+
+ virtual int RedefinedOperandIndex() { return 0; }
+ virtual Representation RequiredInputRepresentation(int index) {
+ return representation();
+ }
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Integer32();
+ }
+
+ virtual HValue* Canonicalize() {
+ if (representation().IsTagged() && !type().IsSmi()) {
+ return this;
+ } else {
+ return value();
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(CheckSmiOrInt32)
+
+ protected:
+ virtual bool DataEquals(HValue* other) { return true; }
+};
+
+
class HPhi: public HValue {
public:
HPhi(int merged_index, Zone* zone)
@@ -2457,13 +2909,15 @@ class HPhi: public HValue {
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
}
- virtual Representation InferredRepresentation();
+ virtual Representation RepresentationFromInputs();
virtual Range* InferRange(Zone* zone);
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ Representation RepresentationObservedByAllNonPhiUses();
+ Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
@@ -2478,6 +2932,8 @@ class HPhi: public HValue {
int merged_index() const { return merged_index_; }
+ virtual void AddInformativeDefinitions();
+
virtual void PrintTo(StringStream* stream);
#ifdef DEBUG
@@ -2527,20 +2983,25 @@ class HPhi: public HValue {
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) {
+ if (FLAG_trace_representation) {
+ HValue* input = OperandAt(i);
+ PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
+ id(), Mnemonic(), input->id(), input->Mnemonic(), i);
+ }
return false;
}
}
return true;
}
- void ResetInteger32Uses();
-
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
inputs_[index] = value;
}
+ virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other);
+
private:
ZoneList<HValue*> inputs_;
int merged_index_;
@@ -2553,6 +3014,52 @@ class HPhi: public HValue {
};
+class HInductionVariableAnnotation : public HUnaryOperation {
+ public:
+ static HInductionVariableAnnotation* AddToGraph(HPhi* phi,
+ NumericRelation relation,
+ int operand_index);
+
+ NumericRelation relation() { return relation_; }
+ HValue* induction_base() { return phi_->OperandAt(operand_index_); }
+
+ virtual int RedefinedOperandIndex() { return 0; }
+ virtual bool IsPurelyInformativeDefinition() { return true; }
+ virtual Representation RequiredInputRepresentation(int index) {
+ return representation();
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual bool IsRelationTrueInternal(NumericRelation other_relation,
+ HValue* other_related_value) {
+ if (induction_base() == other_related_value) {
+ return relation().Implies(other_relation);
+ } else {
+ return false;
+ }
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(InductionVariableAnnotation)
+
+ private:
+ HInductionVariableAnnotation(HPhi* phi,
+ NumericRelation relation,
+ int operand_index)
+ : HUnaryOperation(phi),
+ phi_(phi), relation_(relation), operand_index_(operand_index) {
+ set_representation(phi->representation());
+ }
+
+ // We need to store the phi both here and in the instruction operand because
+ // the operand can change if a new idef of the phi is added between the phi
+ // and this instruction (inserting an idef updates every use).
+ HPhi* phi_;
+ NumericRelation relation_;
+ int operand_index_;
+};
+
+
class HArgumentsObject: public HTemplateInstruction<0> {
public:
HArgumentsObject() {
@@ -2600,18 +3107,19 @@ class HConstant: public HTemplateInstruction<0> {
}
ASSERT(!handle_.is_null());
- Heap* heap = HEAP;
+ Heap* heap = isolate()->heap();
// We should have handled minus_zero_value and nan_value in the
// has_double_value_ clause above.
+ // Dereferencing is safe to compare against singletons.
+ AllowHandleDereference allow_handle_deref(isolate());
ASSERT(*handle_ != heap->minus_zero_value());
ASSERT(*handle_ != heap->nan_value());
- if (*handle_ == heap->undefined_value()) return true;
- if (*handle_ == heap->null_value()) return true;
- if (*handle_ == heap->true_value()) return true;
- if (*handle_ == heap->false_value()) return true;
- if (*handle_ == heap->the_hole_value()) return true;
- if (*handle_ == heap->empty_string()) return true;
- return false;
+ return *handle_ == heap->undefined_value() ||
+ *handle_ == heap->null_value() ||
+ *handle_ == heap->true_value() ||
+ *handle_ == heap->false_value() ||
+ *handle_ == heap->the_hole_value() ||
+ *handle_ == heap->empty_string();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2633,6 +3141,9 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasInteger32Value());
return int32_value_;
}
+ bool HasSmiValue() const {
+ return HasInteger32Value() && Smi::IsValid(Integer32Value());
+ }
bool HasDoubleValue() const { return has_double_value_; }
double DoubleValue() const {
ASSERT(HasDoubleValue());
@@ -2646,6 +3157,15 @@ class HConstant: public HTemplateInstruction<0> {
// representation of the number in int32_value_.
return int32_value_;
}
+ bool HasStringValue() const {
+ if (has_double_value_ || has_int32_value_) return false;
+ ASSERT(!handle_.is_null());
+ return handle_->IsString();
+ }
+ Handle<String> StringValue() const {
+ ASSERT(HasStringValue());
+ return Handle<String>::cast(handle_);
+ }
bool ToBoolean();
@@ -2663,6 +3183,8 @@ class HConstant: public HTemplateInstruction<0> {
hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
} else {
ASSERT(!handle_.is_null());
+ // Dereferencing to use the object's raw address for hashing is safe.
+ AllowHandleDereference allow_handle_deref(isolate());
hash = reinterpret_cast<intptr_t>(*handle_);
}
@@ -2690,11 +3212,13 @@ class HConstant: public HTemplateInstruction<0> {
} else {
ASSERT(!handle_.is_null());
return !other_constant->handle_.is_null() &&
- *handle_ == *other_constant->handle_;
+ handle_.is_identical_to(other_constant->handle_);
}
}
private:
+ void Initialize(Representation r);
+
virtual bool IsDeletable() const { return true; }
// If this is a numerical constant, handle_ either points to to the
@@ -2717,11 +3241,14 @@ class HConstant: public HTemplateInstruction<0> {
class HBinaryOperation: public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right)
+ : observed_output_representation_(Representation::None()) {
ASSERT(left != NULL && right != NULL);
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
+ observed_input_representation_[0] = Representation::None();
+ observed_input_representation_[1] = Representation::None();
}
HValue* context() { return OperandAt(0); }
@@ -2740,11 +3267,34 @@ class HBinaryOperation: public HTemplateInstruction<3> {
return right();
}
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
+ }
+
+ virtual void initialize_output_representation(Representation observed) {
+ observed_output_representation_ = observed;
+ }
+
+ virtual Representation observed_input_representation(int index) {
+ if (index == 0) return Representation::Tagged();
+ return observed_input_representation_[index - 1];
+ }
+
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+ virtual Representation RepresentationFromInputs();
+ virtual void AssumeRepresentation(Representation r);
+
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
+
+ private:
+ Representation observed_input_representation_[2];
+ Representation observed_output_representation_;
};
@@ -2765,6 +3315,8 @@ class HWrapReceiver: public HTemplateInstruction<2> {
virtual HValue* Canonicalize();
+ virtual void PrintDataTo(StringStream* stream);
+
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
};
@@ -2884,47 +3436,59 @@ enum BoundsCheckKeyMode {
class HBoundsCheck: public HTemplateInstruction<2> {
public:
- HBoundsCheck(HValue* index, HValue* length,
- BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY)
- : key_mode_(key_mode) {
+ // Normally HBoundsCheck should be created using the
+ // HGraphBuilder::AddBoundsCheck() helper, which also guards the index with
+ // a HCheckSmiOrInt32 check.
+ // However when building stubs, where we know that the arguments are Int32,
+ // it makes sense to invoke this constructor directly.
+ HBoundsCheck(HValue* index,
+ HValue* length,
+ BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
+ Representation r = Representation::None())
+ : key_mode_(key_mode), skip_check_(false) {
SetOperandAt(0, index);
SetOperandAt(1, length);
- set_representation(Representation::Integer32());
+ if (r.IsNone()) {
+ // In the normal compilation pipeline the representation is flexible
+ // (see InferRepresentation).
+ SetFlag(kFlexibleRepresentation);
+ } else {
+ // When compiling stubs we want to set the representation explicitly
+ // so the compilation pipeline can skip the HInferRepresentation phase.
+ set_representation(r);
+ }
SetFlag(kUseGVN);
}
+ bool skip_check() { return skip_check_; }
+ void set_skip_check(bool skip_check) { skip_check_ = skip_check; }
+
virtual Representation RequiredInputRepresentation(int arg_index) {
- if (key_mode_ == DONT_ALLOW_SMI_KEY ||
- !length()->representation().IsTagged()) {
- return Representation::Integer32();
- }
- // If the index is tagged and isn't constant, then allow the length
- // to be tagged, since it is usually already tagged from loading it out of
- // the length field of a JSArray. This allows for direct comparison without
- // untagging.
- if (index()->representation().IsTagged() && !index()->IsConstant()) {
- return Representation::Tagged();
- }
- // Also allow the length to be tagged if the index is constant, because
- // it can be tagged to allow direct comparison.
- if (index()->IsConstant() &&
- index()->representation().IsInteger32() &&
- arg_index == 1) {
- return Representation::Tagged();
- }
+ return representation();
+ }
+ virtual Representation observed_input_representation(int index) {
return Representation::Integer32();
}
+ virtual bool IsRelationTrueInternal(NumericRelation relation,
+ HValue* related_value);
+
virtual void PrintDataTo(StringStream* stream);
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
HValue* index() { return OperandAt(0); }
HValue* length() { return OperandAt(1); }
+ virtual int RedefinedOperandIndex() { return 0; }
+ virtual bool IsPurelyInformativeDefinition() { return skip_check(); }
+ virtual void AddInformativeDefinitions();
+
DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
protected:
virtual bool DataEquals(HValue* other) { return true; }
BoundsCheckKeyMode key_mode_;
+ bool skip_check_;
};
@@ -2932,12 +3496,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
+ SetFlag(kTruncatingToInt32);
SetAllSideEffects();
- observed_input_representation_[0] = Representation::Tagged();
- observed_input_representation_[1] = Representation::None();
- observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -2950,28 +3511,32 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearAllSideEffects();
- SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
+ } else {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
}
}
- virtual HType CalculateInferredType();
-
- virtual Representation ObservedInputRepresentation(int index) {
- return observed_input_representation_[index];
+ virtual void UpdateRepresentation(Representation new_rep,
+ HInferRepresentation* h_infer,
+ const char* reason) {
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+ HValue::UpdateRepresentation(new_rep, h_infer, reason);
}
- void InitializeObservedInputRepresentation(Representation r) {
- observed_input_representation_[1] = r;
- observed_input_representation_[2] = r;
+ virtual void initialize_output_representation(Representation observed) {
+ if (observed.IsDouble()) observed = Representation::Integer32();
+ HBinaryOperation::initialize_output_representation(observed);
}
+ virtual HType CalculateInferredType();
+
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
virtual bool IsDeletable() const { return true; }
-
- Representation observed_input_representation_[3];
};
@@ -2982,6 +3547,9 @@ class HMathFloorOfDiv: public HBinaryOperation {
set_representation(Representation::Integer32());
SetFlag(kUseGVN);
SetFlag(kCanOverflow);
+ if (!right->IsConstant()) {
+ SetFlag(kCanBeDivByZero);
+ }
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -3004,13 +3572,15 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kFlexibleRepresentation);
SetAllSideEffects();
+ SetFlag(kFlexibleRepresentation);
}
virtual void RepresentationChanged(Representation to) {
- if (!to.IsTagged()) {
+ if (to.IsTagged()) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
@@ -3023,13 +3593,6 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: representation();
}
- virtual Representation InferredRepresentation() {
- if (left()->representation().Equals(right()->representation())) {
- return left()->representation();
- }
- return HValue::InferredRepresentation();
- }
-
private:
virtual bool IsDeletable() const { return true; }
};
@@ -3048,11 +3611,9 @@ class HCompareGeneric: public HBinaryOperation {
}
virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
- return Representation::Tagged();
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
}
Token::Value token() const { return token_; }
@@ -3071,6 +3632,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
public:
HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
+ SetFlag(kFlexibleRepresentation);
ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
@@ -3080,20 +3642,28 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
- void SetInputRepresentation(Representation r);
- Representation GetInputRepresentation() const {
- return input_representation_;
+ void set_observed_input_representation(Representation left,
+ Representation right) {
+ observed_input_representation_[0] = left;
+ observed_input_representation_[1] = right;
}
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
+
virtual Representation RequiredInputRepresentation(int index) {
- return input_representation_;
+ return representation();
+ }
+ virtual Representation observed_input_representation(int index) {
+ return observed_input_representation_[index];
}
virtual void PrintDataTo(StringStream* stream);
+ virtual void AddInformativeDefinitions();
+
DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
- Representation input_representation_;
+ Representation observed_input_representation_[2];
Token::Value token_;
};
@@ -3154,6 +3724,9 @@ class HIsNilAndBranch: public HUnaryControlInstruction {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual Representation observed_input_representation(int index) {
+ return Representation::Tagged();
+ }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
@@ -3413,16 +3986,30 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> {
};
-class HPower: public HTemplateInstruction<2> {
+// TODO(mstarzinger): This instruction should be modeled as a load of the map
+// field followed by a load of the instance size field once HLoadNamedField is
+// flexible enough to accommodate byte-field loads.
+class HInstanceSize: public HTemplateInstruction<1> {
public:
- HPower(HValue* left, HValue* right) {
- SetOperandAt(0, left);
- SetOperandAt(1, right);
- set_representation(Representation::Double());
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
+ explicit HInstanceSize(HValue* object) {
+ SetOperandAt(0, object);
+ set_representation(Representation::Integer32());
+ }
+
+ HValue* object() { return OperandAt(0); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
}
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize)
+};
+
+
+class HPower: public HTemplateInstruction<2> {
+ public:
+ static HInstruction* New(Zone* zone, HValue* left, HValue* right);
+
HValue* left() { return OperandAt(0); }
HValue* right() const { return OperandAt(1); }
@@ -3431,6 +4018,9 @@ class HPower: public HTemplateInstruction<2> {
? Representation::Double()
: Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
DECLARE_CONCRETE_INSTRUCTION(Power)
@@ -3438,6 +4028,14 @@ class HPower: public HTemplateInstruction<2> {
virtual bool DataEquals(HValue* other) { return true; }
private:
+ HPower(HValue* left, HValue* right) {
+ SetOperandAt(0, left);
+ SetOperandAt(1, right);
+ set_representation(Representation::Double());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
virtual bool IsDeletable() const {
return !right()->representation().IsTagged();
}
@@ -3466,10 +4064,10 @@ class HRandom: public HTemplateInstruction<1> {
class HAdd: public HArithmeticBinaryOperation {
public:
- HAdd(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
// Add is only commutative if two integer values are added and not if two
// tagged values are added (because it might be a String concatenation).
@@ -3479,39 +4077,63 @@ class HAdd: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- static HInstruction* NewHAdd(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
+ virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+ HValue* base = NULL;
+ int32_t offset = 0;
+ if (left()->IsInteger32Constant()) {
+ base = right();
+ offset = left()->GetInteger32Constant();
+ } else if (right()->IsInteger32Constant()) {
+ base = left();
+ offset = right()->GetInteger32Constant();
+ } else {
+ return false;
+ }
+
+ return relation.IsExtendable(offset)
+ ? base->IsRelationTrue(relation, other) : false;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(Add)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone);
+
+ private:
+ HAdd(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanOverflow);
+ }
};
class HSub: public HArithmeticBinaryOperation {
public:
- HSub(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
virtual HValue* Canonicalize();
- static HInstruction* NewHSub(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
+ virtual bool IsRelationTrueInternal(NumericRelation relation, HValue* other) {
+ if (right()->IsInteger32Constant()) {
+ HValue* base = left();
+ int32_t offset = right()->GetInteger32Constant();
+ return relation.IsExtendable(-offset)
+ ? base->IsRelationTrue(relation, other) : false;
+ } else {
+ return false;
+ }
+ }
DECLARE_CONCRETE_INSTRUCTION(Sub)
@@ -3519,15 +4141,21 @@ class HSub: public HArithmeticBinaryOperation {
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone);
+
+ private:
+ HSub(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanOverflow);
+ }
};
class HMul: public HArithmeticBinaryOperation {
public:
- HMul(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanOverflow);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
@@ -3536,26 +4164,27 @@ class HMul: public HArithmeticBinaryOperation {
return !representation().IsTagged();
}
- static HInstruction* NewHMul(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Mul)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone);
+
+ private:
+ HMul(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanOverflow);
+ }
};
class HMod: public HArithmeticBinaryOperation {
public:
- HMod(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
bool HasPowerOf2Divisor() {
if (right()->IsConstant() &&
@@ -3569,41 +4198,53 @@ class HMod: public HArithmeticBinaryOperation {
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- static HInstruction* NewHMod(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Mod)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone);
+
+ private:
+ HMod(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanBeDivByZero);
+ }
};
class HDiv: public HArithmeticBinaryOperation {
public:
- HDiv(HValue* context, HValue* left, HValue* right)
- : HArithmeticBinaryOperation(context, left, right) {
- SetFlag(kCanBeDivByZero);
- SetFlag(kCanOverflow);
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
+
+ bool HasPowerOf2Divisor() {
+ if (right()->IsConstant() &&
+ HConstant::cast(right())->HasInteger32Value()) {
+ int32_t value = HConstant::cast(right())->Integer32Value();
+ return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
+ }
+
+ return false;
}
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
- static HInstruction* NewHDiv(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Div)
protected:
virtual bool DataEquals(HValue* other) { return true; }
virtual Range* InferRange(Zone* zone);
+
+ private:
+ HDiv(HValue* context, HValue* left, HValue* right)
+ : HArithmeticBinaryOperation(context, left, right) {
+ SetFlag(kCanBeDivByZero);
+ SetFlag(kCanOverflow);
+ }
};
@@ -3611,19 +4252,28 @@ class HMathMinMax: public HArithmeticBinaryOperation {
public:
enum Operation { kMathMin, kMathMax };
- HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
- : HArithmeticBinaryOperation(context, left, right),
- operation_(op) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right,
+ Operation op);
virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
- }
+ return index == 0 ? Representation::Tagged()
+ : representation();
+ }
+
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
+ virtual void InferRepresentation(HInferRepresentation* h_infer);
- virtual Representation InferredRepresentation() {
- if (left()->representation().IsInteger32() &&
- right()->representation().IsInteger32()) {
+ virtual Representation RepresentationFromInputs() {
+ Representation left_rep = left()->representation();
+ Representation right_rep = right()->representation();
+ if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
+ (right_rep.IsNone() || right_rep.IsInteger32())) {
return Representation::Integer32();
}
return Representation::Double();
@@ -3644,18 +4294,21 @@ class HMathMinMax: public HArithmeticBinaryOperation {
virtual Range* InferRange(Zone* zone);
private:
+ HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
+ : HArithmeticBinaryOperation(context, left, right),
+ operation_(op) { }
+
Operation operation_;
};
class HBitwise: public HBitwiseBinaryOperation {
public:
- HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right), op_(op) {
- ASSERT(op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR);
- }
+ static HInstruction* New(Zone* zone,
+ Token::Value op,
+ HValue* context,
+ HValue* left,
+ HValue* right);
Token::Value op() const { return op_; }
@@ -3663,12 +4316,6 @@ class HBitwise: public HBitwiseBinaryOperation {
virtual HValue* Canonicalize();
- static HInstruction* NewHBitwise(Zone* zone,
- Token::Value op,
- HValue* context,
- HValue* left,
- HValue* right);
-
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(Bitwise)
@@ -3681,79 +4328,82 @@ class HBitwise: public HBitwiseBinaryOperation {
virtual Range* InferRange(Zone* zone);
private:
+ HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right), op_(op) {
+ ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
+ }
+
Token::Value op_;
};
class HShl: public HBitwiseBinaryOperation {
public:
- HShl(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual Range* InferRange(Zone* zone);
- static HInstruction* NewHShl(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Shl)
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ HShl(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
class HShr: public HBitwiseBinaryOperation {
public:
- HShr(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual Range* InferRange(Zone* zone);
- static HInstruction* NewHShr(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Shr)
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ HShr(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
class HSar: public HBitwiseBinaryOperation {
public:
- HSar(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) { }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual Range* InferRange(Zone* zone);
- static HInstruction* NewHSar(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Sar)
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ HSar(HValue* context, HValue* left, HValue* right)
+ : HBitwiseBinaryOperation(context, left, right) { }
};
class HRor: public HBitwiseBinaryOperation {
public:
HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
+ : HBitwiseBinaryOperation(context, left, right) {
ChangeRepresentation(Representation::Integer32());
}
- static HInstruction* NewHRor(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
DECLARE_CONCRETE_INSTRUCTION(Ror)
protected:
@@ -3782,11 +4432,20 @@ class HOsrEntry: public HTemplateInstruction<0> {
class HParameter: public HTemplateInstruction<0> {
public:
- explicit HParameter(unsigned index) : index_(index) {
+ enum ParameterKind {
+ STACK_PARAMETER,
+ REGISTER_PARAMETER
+ };
+
+ explicit HParameter(unsigned index,
+ ParameterKind kind = STACK_PARAMETER)
+ : index_(index),
+ kind_(kind) {
set_representation(Representation::Tagged());
}
unsigned index() const { return index_; }
+ ParameterKind kind() const { return kind_; }
virtual void PrintDataTo(StringStream* stream);
@@ -3798,6 +4457,7 @@ class HParameter: public HTemplateInstruction<0> {
private:
unsigned index_;
+ ParameterKind kind_;
};
@@ -3869,13 +4529,15 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
SetGVNFlag(kDependsOnGlobalVars);
}
- Handle<JSGlobalPropertyCell> cell() const { return cell_; }
+ Handle<JSGlobalPropertyCell> cell() const { return cell_; }
bool RequiresHoleCheck() const;
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
ASSERT_ALLOCATION_DISABLED;
+ // Dereferencing to use the object's raw address for hashing is safe.
+ AllowHandleDereference allow_handle_deref(isolate());
return reinterpret_cast<intptr_t>(*cell_);
}
@@ -3932,6 +4594,106 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> {
};
+class HAllocateObject: public HTemplateInstruction<1> {
+ public:
+ HAllocateObject(HValue* context, Handle<JSFunction> constructor)
+ : constructor_(constructor) {
+ SetOperandAt(0, context);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
+ // Maximum instance size for which allocations will be inlined.
+ static const int kMaxSize = 64 * kPointerSize;
+
+ HValue* context() { return OperandAt(0); }
+ Handle<JSFunction> constructor() { return constructor_; }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ ASSERT(constructor()->has_initial_map());
+ return Handle<Map>(constructor()->initial_map());
+ }
+ virtual HType CalculateInferredType();
+
+ DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
+
+ private:
+ // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
+ // virtual bool IsDeletable() const { return true; }
+
+ Handle<JSFunction> constructor_;
+};
+
+
+class HAllocate: public HTemplateInstruction<2> {
+ public:
+ enum Flags {
+ CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0,
+ CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1,
+ CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2,
+ ALLOCATE_DOUBLE_ALIGNED = 1 << 3
+ };
+
+ HAllocate(HValue* context, HValue* size, HType type, Flags flags)
+ : type_(type),
+ flags_(flags) {
+ ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
+ ASSERT((flags & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) == 0); // unimplemented
+ SetOperandAt(0, context);
+ SetOperandAt(1, size);
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
+ HValue* context() { return OperandAt(0); }
+ HValue* size() { return OperandAt(1); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ if (index == 0) {
+ return Representation::Tagged();
+ } else {
+ return Representation::Integer32();
+ }
+ }
+
+ virtual HType CalculateInferredType();
+
+ bool CanAllocateInNewSpace() const {
+ return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0;
+ }
+
+ bool CanAllocateInOldDataSpace() const {
+ return (flags_ & CAN_ALLOCATE_IN_OLD_DATA_SPACE) != 0;
+ }
+
+ bool CanAllocateInOldPointerSpace() const {
+ return (flags_ & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) != 0;
+ }
+
+ bool CanAllocateInOldSpace() const {
+ return CanAllocateInOldDataSpace() ||
+ CanAllocateInOldPointerSpace();
+ }
+
+ bool GuaranteedInNewSpace() const {
+ return CanAllocateInNewSpace() && !CanAllocateInOldSpace();
+ }
+
+ bool MustAllocateDoubleAligned() const {
+ return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate)
+
+ private:
+ HType type_;
+ Flags flags_;
+};
+
+
inline bool StoringValueNeedsWriteBarrier(HValue* value) {
return !value->type().IsBoolean()
&& !value->type().IsSmi()
@@ -3941,8 +4703,13 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
- return (!object->IsAllocateObject() && !object->IsFastLiteral()) ||
- (object != new_space_dominator);
+ if (object != new_space_dominator) return true;
+ if (object->IsFastLiteral()) return false;
+ if (object->IsAllocateObject()) return false;
+ if (object->IsAllocate()) {
+ return !HAllocate::cast(object)->GuaranteedInNewSpace();
+ }
+ return true;
}
@@ -4266,6 +5033,17 @@ class ArrayInstructionInterface {
virtual bool IsDehoisted() = 0;
virtual void SetDehoisted(bool is_dehoisted) = 0;
virtual ~ArrayInstructionInterface() { };
+
+ static Representation KeyedAccessIndexRequirement(Representation r) {
+ return r.IsInteger32() ? Representation::Integer32()
+ : Representation::Tagged();
+ }
+};
+
+
+enum LoadKeyedHoleMode {
+ NEVER_RETURN_HOLE,
+ ALLOW_RETURN_HOLE
};
@@ -4275,13 +5053,15 @@ class HLoadKeyed
HLoadKeyed(HValue* obj,
HValue* key,
HValue* dependency,
- ElementsKind elements_kind)
+ ElementsKind elements_kind,
+ LoadKeyedHoleMode mode = NEVER_RETURN_HOLE)
: bit_field_(0) {
- bit_field_ = ElementsKindField::encode(elements_kind);
+ bit_field_ = ElementsKindField::encode(elements_kind) |
+ HoleModeField::encode(mode);
SetOperandAt(0, obj);
SetOperandAt(1, key);
- SetOperandAt(2, dependency);
+ SetOperandAt(2, dependency != NULL ? dependency : obj);
if (!is_external()) {
// I can detect the case between storing double (holey and fast) and
@@ -4290,8 +5070,7 @@ class HLoadKeyed
IsFastDoubleElementsKind(elements_kind));
if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (IsFastSmiElementsKind(elements_kind) &&
- IsFastPackedElementsKind(elements_kind)) {
+ if (IsFastSmiElementsKind(elements_kind)) {
set_type(HType::Smi());
}
@@ -4322,7 +5101,11 @@ class HLoadKeyed
}
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
- HValue* dependency() { return OperandAt(2); }
+ HValue* dependency() {
+ ASSERT(HasDependency());
+ return OperandAt(2);
+ }
+ bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
void SetIndexOffset(uint32_t index_offset) {
bit_field_ = IndexOffsetField::update(bit_field_, index_offset);
@@ -4336,6 +5119,9 @@ class HLoadKeyed
ElementsKind elements_kind() const {
return ElementsKindField::decode(bit_field_);
}
+ LoadKeyedHoleMode hole_mode() const {
+ return HoleModeField::decode(bit_field_);
+ }
virtual Representation RequiredInputRepresentation(int index) {
// kind_fast: tagged[int32] (none)
@@ -4345,12 +5131,20 @@ class HLoadKeyed
return is_external() ? Representation::External()
: Representation::Tagged();
}
- if (index == 1) return Representation::Integer32();
+ if (index == 1) {
+ return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+ OperandAt(1)->representation());
+ }
return Representation::None();
}
+ virtual Representation observed_input_representation(int index) {
+ return RequiredInputRepresentation(index);
+ }
+
virtual void PrintDataTo(StringStream* stream);
+ bool UsesMustHandleHole() const;
bool RequiresHoleCheck() const;
virtual Range* InferRange(Zone* zone);
@@ -4375,11 +5169,13 @@ class HLoadKeyed
// Establish some checks around our packed fields
enum LoadKeyedBits {
kBitsForElementsKind = 5,
- kBitsForIndexOffset = 26,
+ kBitsForHoleMode = 1,
+ kBitsForIndexOffset = 25,
kBitsForIsDehoisted = 1,
kStartElementsKind = 0,
- kStartIndexOffset = kStartElementsKind + kBitsForElementsKind,
+ kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
+ kStartIndexOffset = kStartHoleMode + kBitsForHoleMode,
kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
};
@@ -4389,6 +5185,9 @@ class HLoadKeyed
class ElementsKindField:
public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
{}; // NOLINT
+ class HoleModeField:
+ public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
+ {}; // NOLINT
class IndexOffsetField:
public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
{}; // NOLINT
@@ -4527,11 +5326,18 @@ class HStoreKeyed
public:
HStoreKeyed(HValue* obj, HValue* key, HValue* val,
ElementsKind elements_kind)
- : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
+ : elements_kind_(elements_kind),
+ index_offset_(0),
+ is_dehoisted_(false),
+ new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ if (IsFastObjectElementsKind(elements_kind)) {
+ SetFlag(kTrackSideEffectDominators);
+ SetGVNFlag(kDependsOnNewSpacePromotion);
+ }
if (is_external()) {
SetGVNFlag(kChangesSpecializedArrayElements);
} else if (IsFastDoubleElementsKind(elements_kind)) {
@@ -4540,6 +5346,12 @@ class HStoreKeyed
} else {
SetGVNFlag(kChangesArrayElements);
}
+
+ // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+ if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
+ elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ SetFlag(kTruncatingToInt32);
+ }
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4550,7 +5362,8 @@ class HStoreKeyed
return is_external() ? Representation::External()
: Representation::Tagged();
} else if (index == 1) {
- return Representation::Integer32();
+ return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+ OperandAt(1)->representation());
}
ASSERT_EQ(index, 2);
@@ -4565,6 +5378,19 @@ class HStoreKeyed
bool is_external() const {
return IsExternalArrayElementsKind(elements_kind());
}
+
+ virtual Representation observed_input_representation(int index) {
+ if (index < 2) return RequiredInputRepresentation(index);
+ if (IsDoubleOrFloatElementsKind(elements_kind())) {
+ return Representation::Double();
+ }
+ if (is_external()) {
+ return Representation::Integer32();
+ }
+ // For fast object elements kinds, don't assume anything.
+ return Representation::None();
+ }
+
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
@@ -4579,11 +5405,19 @@ class HStoreKeyed
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
+ ASSERT(side_effect == kChangesNewSpacePromotion);
+ new_space_dominator_ = dominator;
+ }
+
+ HValue* new_space_dominator() const { return new_space_dominator_; }
+
bool NeedsWriteBarrier() {
if (value_is_smi()) {
return false;
} else {
- return StoringValueNeedsWriteBarrier(value());
+ return StoringValueNeedsWriteBarrier(value()) &&
+ ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator());
}
}
@@ -4597,6 +5431,7 @@ class HStoreKeyed
ElementsKind elements_kind_;
uint32_t index_offset_;
bool is_dehoisted_;
+ HValue* new_space_dominator_;
};
@@ -4635,14 +5470,18 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
};
-class HTransitionElementsKind: public HTemplateInstruction<1> {
+class HTransitionElementsKind: public HTemplateInstruction<2> {
public:
- HTransitionElementsKind(HValue* object,
+ HTransitionElementsKind(HValue* context,
+ HValue* object,
Handle<Map> original_map,
Handle<Map> transitioned_map)
: original_map_(original_map),
- transitioned_map_(transitioned_map) {
+ transitioned_map_(transitioned_map),
+ from_kind_(original_map->elements_kind()),
+ to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
+ SetOperandAt(1, context);
SetFlag(kUseGVN);
SetGVNFlag(kChangesElementsKind);
if (original_map->has_fast_double_elements()) {
@@ -4661,8 +5500,11 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
}
HValue* object() { return OperandAt(0); }
+ HValue* context() { return OperandAt(1); }
Handle<Map> original_map() { return original_map_; }
Handle<Map> transitioned_map() { return transitioned_map_; }
+ ElementsKind from_kind() { return from_kind_; }
+ ElementsKind to_kind() { return to_kind_; }
virtual void PrintDataTo(StringStream* stream);
@@ -4678,18 +5520,17 @@ class HTransitionElementsKind: public HTemplateInstruction<1> {
private:
Handle<Map> original_map_;
Handle<Map> transitioned_map_;
+ ElementsKind from_kind_;
+ ElementsKind to_kind_;
};
class HStringAdd: public HBinaryOperation {
public:
- HStringAdd(HValue* context, HValue* left, HValue* right)
- : HBinaryOperation(context, left, right) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* left,
+ HValue* right);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@@ -4704,8 +5545,17 @@ class HStringAdd: public HBinaryOperation {
protected:
virtual bool DataEquals(HValue* other) { return true; }
+
+ private:
+ HStringAdd(HValue* context, HValue* left, HValue* right)
+ : HBinaryOperation(context, left, right) {
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
// TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
// virtual bool IsDeletable() const { return true; }
};
@@ -4750,13 +5600,9 @@ class HStringCharCodeAt: public HTemplateInstruction<3> {
class HStringCharFromCode: public HTemplateInstruction<2> {
public:
- HStringCharFromCode(HValue* context, HValue* char_code) {
- SetOperandAt(0, context);
- SetOperandAt(1, char_code);
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
+ static HInstruction* New(Zone* zone,
+ HValue* context,
+ HValue* char_code);
virtual Representation RequiredInputRepresentation(int index) {
return index == 0
@@ -4772,19 +5618,23 @@ class HStringCharFromCode: public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
+ private:
+ HStringCharFromCode(HValue* context, HValue* char_code) {
+ SetOperandAt(0, context);
+ SetOperandAt(1, char_code);
+ set_representation(Representation::Tagged());
+ SetFlag(kUseGVN);
+ SetGVNFlag(kChangesNewSpacePromotion);
+ }
+
// TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // private:
- // virtual bool IsDeletable() const { return true; }
+ // virtual bool IsDeletable() const { return true; }
};
class HStringLength: public HUnaryOperation {
public:
- explicit HStringLength(HValue* string) : HUnaryOperation(string) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetGVNFlag(kDependsOnMaps);
- }
+ static HInstruction* New(Zone* zone, HValue* string);
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
@@ -4805,56 +5655,42 @@ class HStringLength: public HUnaryOperation {
}
private:
- virtual bool IsDeletable() const { return true; }
-};
-
-
-class HAllocateObject: public HTemplateInstruction<1> {
- public:
- HAllocateObject(HValue* context, Handle<JSFunction> constructor)
- : constructor_(constructor) {
- SetOperandAt(0, context);
+ explicit HStringLength(HValue* string) : HUnaryOperation(string) {
set_representation(Representation::Tagged());
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // Maximum instance size for which allocations will be inlined.
- static const int kMaxSize = 64 * kPointerSize;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSFunction> constructor() { return constructor_; }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
+ SetFlag(kUseGVN);
+ SetGVNFlag(kDependsOnMaps);
}
- virtual HType CalculateInferredType();
- DECLARE_CONCRETE_INSTRUCTION(AllocateObject)
-
- private:
- // TODO(svenpanne) Might be safe, but leave it out until we know for sure.
- // virtual bool IsDeletable() const { return true; }
-
- Handle<JSFunction> constructor_;
+ virtual bool IsDeletable() const { return true; }
};
template <int V>
class HMaterializedLiteral: public HTemplateInstruction<V> {
public:
+ HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode)
+ : literal_index_(index), depth_(depth), allocation_site_mode_(mode) {
+ this->set_representation(Representation::Tagged());
+ }
+
HMaterializedLiteral<V>(int index, int depth)
- : literal_index_(index), depth_(depth) {
+ : literal_index_(index), depth_(depth),
+ allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) {
this->set_representation(Representation::Tagged());
}
int literal_index() const { return literal_index_; }
int depth() const { return depth_; }
+ AllocationSiteMode allocation_site_mode() const {
+ return allocation_site_mode_;
+ }
private:
virtual bool IsDeletable() const { return true; }
int literal_index_;
int depth_;
+ AllocationSiteMode allocation_site_mode_;
};
@@ -4864,8 +5700,9 @@ class HFastLiteral: public HMaterializedLiteral<1> {
Handle<JSObject> boilerplate,
int total_size,
int literal_index,
- int depth)
- : HMaterializedLiteral<1>(literal_index, depth),
+ int depth,
+ AllocationSiteMode mode)
+ : HMaterializedLiteral<1>(literal_index, depth, mode),
boilerplate_(boilerplate),
total_size_(total_size) {
SetOperandAt(0, context);
@@ -4880,10 +5717,12 @@ class HFastLiteral: public HMaterializedLiteral<1> {
HValue* context() { return OperandAt(0); }
Handle<JSObject> boilerplate() const { return boilerplate_; }
int total_size() const { return total_size_; }
-
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
+ virtual Handle<Map> GetMonomorphicJSObjectMap() {
+ return Handle<Map>(boilerplate()->map());
+ }
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
@@ -4900,8 +5739,9 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
Handle<HeapObject> boilerplate_object,
int length,
int literal_index,
- int depth)
- : HMaterializedLiteral<1>(literal_index, depth),
+ int depth,
+ AllocationSiteMode mode)
+ : HMaterializedLiteral<1>(literal_index, depth, mode),
length_(length),
boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context);
@@ -4917,7 +5757,6 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
}
Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
-
bool IsCopyOnWrite() const;
virtual Representation RequiredInputRepresentation(int index) {
@@ -5059,6 +5898,22 @@ class HTypeof: public HTemplateInstruction<2> {
};
+class HTrapAllocationMemento : public HTemplateInstruction<1> {
+ public:
+ explicit HTrapAllocationMemento(HValue* obj) {
+ SetOperandAt(0, obj);
+ }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ HValue* object() { return OperandAt(0); }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
+};
+
+
class HToFastProperties: public HUnaryOperation {
public:
explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
@@ -5117,6 +5972,33 @@ class HDateField: public HUnaryOperation {
};
+class HSeqStringSetChar: public HTemplateInstruction<3> {
+ public:
+ HSeqStringSetChar(String::Encoding encoding,
+ HValue* string,
+ HValue* index,
+ HValue* value) : encoding_(encoding) {
+ SetOperandAt(0, string);
+ SetOperandAt(1, index);
+ SetOperandAt(2, value);
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ HValue* string() { return OperandAt(0); }
+ HValue* index() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ return Representation::Tagged();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class HDeleteProperty: public HBinaryOperation {
public:
HDeleteProperty(HValue* context, HValue* obj, HValue* key)
diff --git a/src/3rdparty/v8/src/hydrogen.cc b/src/3rdparty/v8/src/hydrogen.cc
index 043d567..82ffbb2 100644
--- a/src/3rdparty/v8/src/hydrogen.cc
+++ b/src/3rdparty/v8/src/hydrogen.cc
@@ -71,7 +71,8 @@ HBasicBlock::HBasicBlock(HGraph* graph)
parent_loop_header_(NULL),
is_inline_return_target_(false),
is_deoptimizing_(false),
- dominates_loop_successors_(false) { }
+ dominates_loop_successors_(false),
+ is_osr_entry_(false) { }
void HBasicBlock::AttachLoopInformation() {
@@ -133,21 +134,30 @@ HDeoptimize* HBasicBlock::CreateDeoptimize(
}
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
+ RemovableSimulate removable) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
+ ast_id == BailoutId::StubEntry() ||
environment->closure()->shared()->VerifyBailoutId(ast_id));
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
- for (int i = push_count - 1; i >= 0; --i) {
+ HSimulate* instr =
+ new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+ // Order of pushed values: newest (top of stack) first. This allows
+ // HSimulate::MergeInto() to easily append additional pushed values
+ // that are older (from further down the stack).
+ for (int i = 0; i < push_count; ++i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
- for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
- int index = environment->assigned_variables()->at(i);
+ for (GrowableBitVector::Iterator it(environment->assigned_variables(),
+ zone());
+ !it.Done();
+ it.Advance()) {
+ int index = it.Current();
instr->AddAssignedValue(index, environment->Lookup(index));
}
environment->ClearHistory();
@@ -212,8 +222,9 @@ void HBasicBlock::SetJoinId(BailoutId ast_id) {
HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
// We only need to verify the ID once.
ASSERT(i != 0 ||
- predecessor->last_environment()->closure()->shared()
- ->VerifyBailoutId(ast_id));
+ (predecessor->last_environment()->closure().is_null() ||
+ predecessor->last_environment()->closure()->shared()
+ ->VerifyBailoutId(ast_id)));
simulate->set_ast_id(ast_id);
}
}
@@ -488,6 +499,8 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
+ // Allow dereferencing for debug mode verification.
+ AllowHandleDereference allow_handle_deref(isolate());
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
@@ -591,6 +604,11 @@ HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer,
}
+HConstant* HGraph::GetConstant0() {
+ return GetConstantInt32(&constant_0_, 0);
+}
+
+
HConstant* HGraph::GetConstant1() {
return GetConstantInt32(&constant_1_, 1);
}
@@ -616,33 +634,537 @@ HConstant* HGraph::GetConstantHole() {
}
-HGraphBuilder::HGraphBuilder(CompilationInfo* info,
- TypeFeedbackOracle* oracle)
- : function_state_(NULL),
+HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder, BailoutId id)
+ : builder_(builder),
+ finished_(false),
+ id_(id) {
+ HEnvironment* env = builder->environment();
+ failure_block_ = builder->CreateBasicBlock(env->Copy());
+ merge_block_ = builder->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) {
+ HEnvironment* env = builder_->environment();
+ HIsNilAndBranch* compare =
+ new(zone()) HIsNilAndBranch(value, kStrictEquality, kUndefinedValue);
+ HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
+ HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
+ compare->SetSuccessorAt(0, failure_block);
+ compare->SetSuccessorAt(1, success_block);
+ failure_block->Goto(failure_block_);
+ builder_->current_block()->Finish(compare);
+ builder_->set_current_block(success_block);
+}
+
+
+void HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left, HValue* right) {
+ HEnvironment* env = builder_->environment();
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(left, right, Token::EQ);
+ compare->AssumeRepresentation(Representation::Integer32());
+ HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy());
+ HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy());
+ compare->SetSuccessorAt(0, success_block);
+ compare->SetSuccessorAt(1, failure_block);
+ failure_block->Goto(failure_block_);
+ builder_->current_block()->Finish(compare);
+ builder_->set_current_block(success_block);
+}
+
+
+void HGraphBuilder::CheckBuilder::End() {
+ ASSERT(!finished_);
+ builder_->current_block()->Goto(merge_block_);
+ failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll);
+ failure_block_->SetJoinId(id_);
+ builder_->set_current_block(merge_block_);
+ merge_block_->SetJoinId(id_);
+ finished_ = true;
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, BailoutId id)
+ : builder_(builder),
+ finished_(false),
+ id_(id) {
+ HEnvironment* env = builder->environment();
+ first_true_block_ = builder->CreateBasicBlock(env->Copy());
+ last_true_block_ = NULL;
+ first_false_block_ = builder->CreateBasicBlock(env->Copy());
+}
+
+
+HInstruction* HGraphBuilder::IfBuilder::BeginTrue(
+ HValue* left,
+ HValue* right,
+ Token::Value token,
+ Representation input_representation) {
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(left, right, token);
+ compare->set_observed_input_representation(input_representation,
+ input_representation);
+ compare->ChangeRepresentation(input_representation);
+ compare->SetSuccessorAt(0, first_true_block_);
+ compare->SetSuccessorAt(1, first_false_block_);
+ builder_->current_block()->Finish(compare);
+ builder_->set_current_block(first_true_block_);
+ return compare;
+}
+
+
+void HGraphBuilder::IfBuilder::BeginFalse() {
+ last_true_block_ = builder_->current_block();
+ ASSERT(!last_true_block_->IsFinished());
+ builder_->set_current_block(first_false_block_);
+}
+
+
+void HGraphBuilder::IfBuilder::End() {
+ ASSERT(!finished_);
+ ASSERT(!last_true_block_->IsFinished());
+ HBasicBlock* last_false_block = builder_->current_block();
+ ASSERT(!last_false_block->IsFinished());
+ HEnvironment* merge_env =
+ last_true_block_->last_environment()->Copy();
+ merge_block_ = builder_->CreateBasicBlock(merge_env);
+ last_true_block_->Goto(merge_block_);
+ last_false_block->Goto(merge_block_);
+ merge_block_->SetJoinId(id_);
+ builder_->set_current_block(merge_block_);
+ finished_ = true;
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ LoopBuilder::Direction direction,
+ BailoutId id)
+ : builder_(builder),
+ context_(context),
+ direction_(direction),
+ id_(id),
+ finished_(false) {
+ header_block_ = builder->CreateLoopHeaderBlock();
+ body_block_ = NULL;
+ exit_block_ = NULL;
+}
+
+
+HValue* HGraphBuilder::LoopBuilder::BeginBody(
+ HValue* initial,
+ HValue* terminating,
+ Token::Value token,
+ Representation input_representation) {
+ HEnvironment* env = builder_->environment();
+ phi_ = new(zone()) HPhi(env->values()->length(), zone());
+ header_block_->AddPhi(phi_);
+ phi_->AddInput(initial);
+ phi_->ChangeRepresentation(Representation::Integer32());
+ env->Push(initial);
+ builder_->current_block()->Goto(header_block_);
+
+ HEnvironment* body_env = env->Copy();
+ HEnvironment* exit_env = env->Copy();
+ body_block_ = builder_->CreateBasicBlock(body_env);
+ exit_block_ = builder_->CreateBasicBlock(exit_env);
+ // Remove the phi from the expression stack
+ body_env->Pop();
+
+ builder_->set_current_block(header_block_);
+ HCompareIDAndBranch* compare =
+ new(zone()) HCompareIDAndBranch(phi_, terminating, token);
+ compare->set_observed_input_representation(input_representation,
+ input_representation);
+ compare->ChangeRepresentation(input_representation);
+ compare->SetSuccessorAt(0, body_block_);
+ compare->SetSuccessorAt(1, exit_block_);
+ builder_->current_block()->Finish(compare);
+
+ builder_->set_current_block(body_block_);
+ if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
+ HValue* one = builder_->graph()->GetConstant1();
+ if (direction_ == kPreIncrement) {
+ increment_ = HAdd::New(zone(), context_, phi_, one);
+ } else {
+ increment_ = HSub::New(zone(), context_, phi_, one);
+ }
+ increment_->ClearFlag(HValue::kCanOverflow);
+ increment_->ChangeRepresentation(Representation::Integer32());
+ builder_->AddInstruction(increment_);
+ return increment_;
+ } else {
+ return phi_;
+ }
+}
+
+
+void HGraphBuilder::LoopBuilder::EndBody() {
+ ASSERT(!finished_);
+
+ if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
+ HValue* one = builder_->graph()->GetConstant1();
+ if (direction_ == kPostIncrement) {
+ increment_ = HAdd::New(zone(), context_, phi_, one);
+ } else {
+ increment_ = HSub::New(zone(), context_, phi_, one);
+ }
+ increment_->ClearFlag(HValue::kCanOverflow);
+ increment_->ChangeRepresentation(Representation::Integer32());
+ builder_->AddInstruction(increment_);
+ }
+
+ // Push the new increment value on the expression stack to merge into the phi.
+ builder_->environment()->Push(increment_);
+ builder_->current_block()->Goto(header_block_);
+ header_block_->loop_information()->RegisterBackEdge(body_block_);
+ header_block_->SetJoinId(id_);
+
+ builder_->set_current_block(exit_block_);
+ // Pop the phi from the expression stack
+ builder_->environment()->Pop();
+ finished_ = true;
+}
+
+
+HGraph* HGraphBuilder::CreateGraph() {
+ graph_ = new(zone()) HGraph(info_);
+ if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info_);
+ HPhase phase("H_Block building");
+ set_current_block(graph()->entry_block());
+ if (!BuildGraph()) return NULL;
+ return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddInstruction(instr);
+ return instr;
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+ RemovableSimulate removable) {
+ ASSERT(current_block() != NULL);
+ current_block()->AddSimulate(id, removable);
+}
+
+
+HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index,
+ HValue* length,
+ BoundsCheckKeyMode key_mode,
+ Representation r) {
+ if (!index->type().IsSmi()) {
+ index = new(graph()->zone()) HCheckSmiOrInt32(index);
+ AddInstruction(HCheckSmiOrInt32::cast(index));
+ }
+ if (!length->type().IsSmi()) {
+ length = new(graph()->zone()) HCheckSmiOrInt32(length);
+ AddInstruction(HCheckSmiOrInt32::cast(length));
+ }
+ HBoundsCheck* result = new(graph()->zone()) HBoundsCheck(
+ index, length, key_mode, r);
+ AddInstruction(result);
+ return result;
+}
+
+
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+ HBasicBlock* b = graph()->CreateBasicBlock();
+ b->SetInitialEnvironment(env);
+ return b;
+}
+
+
+HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+ HBasicBlock* header = graph()->CreateBasicBlock();
+ HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
+ header->SetInitialEnvironment(entry_env);
+ header->AttachLoopInformation();
+ return header;
+}
+
+
+HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ val = AddInstruction(new(zone) HClampToUint8(val));
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ break;
+ }
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ return new(zone) HStoreKeyed(external_elements, checked_key,
+ val, elements_kind);
+ } else {
+ ASSERT(val == NULL);
+ HLoadKeyed* load =
+ new(zone) HLoadKeyed(
+ external_elements, checked_key, dependency, elements_kind);
+ if (FLAG_opt_safe_uint32_operations &&
+ elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
+ graph()->RecordUint32Instruction(load);
+ }
+ return load;
+ }
+}
+
+
+HInstruction* HGraphBuilder::BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* load_dependency,
+ ElementsKind elements_kind,
+ bool is_store) {
+ Zone* zone = this->zone();
+ if (is_store) {
+ ASSERT(val != NULL);
+ switch (elements_kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ // Smi-only arrays need a smi check.
+ AddInstruction(new(zone) HCheckSmi(val));
+ // Fall through.
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind);
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+ }
+ // It's an element load (!is_store).
+ return new(zone) HLoadKeyed(elements,
+ checked_key,
+ load_dependency,
+ elements_kind);
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store,
+ Representation checked_index_representation) {
+ Zone* zone = this->zone();
+ // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+ // on a HElementsTransition instruction. The flag can also be removed if the
+ // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+ // ElementsKind transitions. Finally, the dependency can be removed for stores
+ // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+ // generated store code.
+ if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+ (elements_kind == FAST_ELEMENTS && is_store)) {
+ if (mapcheck != NULL) {
+ mapcheck->ClearGVNFlag(kDependsOnElementsKind);
+ }
+ }
+ bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+ bool fast_elements = IsFastObjectElementsKind(elements_kind);
+ HInstruction* elements =
+ AddInstruction(new(zone) HLoadElements(object, mapcheck));
+ if (is_store && (fast_elements || fast_smi_only_elements)) {
+ HCheckMaps* check_cow_map = new(zone) HCheckMaps(
+ elements, graph()->isolate()->factory()->fixed_array_map(), zone);
+ check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
+ AddInstruction(check_cow_map);
+ }
+ HInstruction* length = NULL;
+ HInstruction* checked_key = NULL;
+ if (IsExternalArrayElementsKind(elements_kind)) {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ checked_key = AddBoundsCheck(
+ key, length, ALLOW_SMI_KEY, checked_index_representation);
+ HLoadExternalArrayPointer* external_elements =
+ new(zone) HLoadExternalArrayPointer(elements);
+ AddInstruction(external_elements);
+ return BuildExternalArrayElementAccess(
+ external_elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+ }
+ ASSERT(fast_smi_only_elements ||
+ fast_elements ||
+ IsFastDoubleElementsKind(elements_kind));
+ if (is_js_array) {
+ length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck,
+ HType::Smi()));
+ } else {
+ length = AddInstruction(new(zone) HFixedArrayBaseLength(elements));
+ }
+ checked_key = AddBoundsCheck(
+ key, length, ALLOW_SMI_KEY, checked_index_representation);
+ return BuildFastElementAccess(elements, checked_key, val, mapcheck,
+ elements_kind, is_store);
+}
+
+
+HValue* HGraphBuilder::BuildAllocateElements(HContext* context,
+ ElementsKind kind,
+ HValue* capacity) {
+ Zone* zone = this->zone();
+
+ int elements_size = IsFastDoubleElementsKind(kind)
+ ? kDoubleSize : kPointerSize;
+ HConstant* elements_size_value =
+ new(zone) HConstant(elements_size, Representation::Integer32());
+ AddInstruction(elements_size_value);
+ HValue* mul = AddInstruction(
+ HMul::New(zone, context, capacity, elements_size_value));
+ mul->ChangeRepresentation(Representation::Integer32());
+ mul->ClearFlag(HValue::kCanOverflow);
+
+ HConstant* header_size =
+ new(zone) HConstant(FixedArray::kHeaderSize, Representation::Integer32());
+ AddInstruction(header_size);
+ HValue* total_size = AddInstruction(
+ HAdd::New(zone, context, mul, header_size));
+ total_size->ChangeRepresentation(Representation::Integer32());
+ total_size->ClearFlag(HValue::kCanOverflow);
+
+ HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
+ if (IsFastDoubleElementsKind(kind)) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
+ }
+
+ HValue* elements =
+ AddInstruction(new(zone) HAllocate(context, total_size,
+ HType::JSArray(), flags));
+ Isolate* isolate = graph()->isolate();
+
+ Factory* factory = isolate->factory();
+ Handle<Map> map = IsFastDoubleElementsKind(kind)
+ ? factory->fixed_double_array_map()
+ : factory->fixed_array_map();
+ BuildStoreMap(elements, map, BailoutId::StubEntry());
+
+ Handle<String> fixed_array_length_field_name =
+ isolate->factory()->length_field_string();
+ HInstruction* store_length =
+ new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
+ capacity, true, FixedArray::kLengthOffset);
+ AddInstruction(store_length);
+ AddSimulate(BailoutId::StubEntry(), FIXED_SIMULATE);
+
+ return elements;
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
+ HValue* map,
+ BailoutId id) {
+ Zone* zone = this->zone();
+ Isolate* isolate = graph()->isolate();
+ Factory* factory = isolate->factory();
+ Handle<String> map_field_name = factory->map_field_string();
+ HInstruction* store_map =
+ new(zone) HStoreNamedField(object, map_field_name, map,
+ true, JSObject::kMapOffset);
+ store_map->SetGVNFlag(kChangesMaps);
+ AddInstruction(store_map);
+ AddSimulate(id, FIXED_SIMULATE);
+ return store_map;
+}
+
+
+HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
+ Handle<Map> map,
+ BailoutId id) {
+ Zone* zone = this->zone();
+ HValue* map_constant =
+ AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
+ return BuildStoreMap(object, map_constant, id);
+}
+
+
+void HGraphBuilder::BuildCopyElements(HContext* context,
+ HValue* from_elements,
+ ElementsKind from_elements_kind,
+ HValue* to_elements,
+ ElementsKind to_elements_kind,
+ HValue* length) {
+ LoopBuilder builder(this, context, LoopBuilder::kPostIncrement,
+ BailoutId::StubEntry());
+
+ HValue* key = builder.BeginBody(graph()->GetConstant0(),
+ length, Token::LT);
+
+ HValue* element =
+ AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL,
+ from_elements_kind,
+ ALLOW_RETURN_HOLE));
+
+ AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
+ to_elements_kind));
+ AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
+
+ builder.EndBody();
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
+ TypeFeedbackOracle* oracle)
+ : HGraphBuilder(info),
+ function_state_(NULL),
initial_function_state_(this, info, oracle, NORMAL_RETURN),
ast_context_(NULL),
break_scope_(NULL),
- graph_(NULL),
- current_block_(NULL),
inlined_count_(0),
globals_(10, info->zone()),
- zone_(info->zone()),
inline_bailout_(false) {
// This is not initialized in the initializer list because the
// constructor for the initial state relies on function_state_ == NULL
// to know it's the initial state.
function_state_= &initial_function_state_;
+ InitializeAstVisitor();
}
-HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
- HBasicBlock* second,
- BailoutId join_id) {
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+ HBasicBlock* second,
+ BailoutId join_id) {
if (first == NULL) {
return second;
} else if (second == NULL) {
return first;
} else {
- HBasicBlock* join_block = graph_->CreateBasicBlock();
+ HBasicBlock* join_block = graph()->CreateBasicBlock();
first->Goto(join_block);
second->Goto(join_block);
join_block->SetJoinId(join_id);
@@ -651,9 +1173,9 @@ HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
}
-HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
- HBasicBlock* exit_block,
- HBasicBlock* continue_block) {
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+ HBasicBlock* exit_block,
+ HBasicBlock* continue_block) {
if (continue_block != NULL) {
if (exit_block != NULL) exit_block->Goto(continue_block);
continue_block->SetJoinId(statement->ContinueId());
@@ -663,11 +1185,11 @@ HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
}
-HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement,
- HBasicBlock* loop_entry,
- HBasicBlock* body_exit,
- HBasicBlock* loop_successor,
- HBasicBlock* break_block) {
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+ HBasicBlock* loop_entry,
+ HBasicBlock* body_exit,
+ HBasicBlock* loop_successor,
+ HBasicBlock* break_block) {
if (body_exit != NULL) body_exit->Goto(loop_entry);
loop_entry->PostProcessLoopHeader(statement);
if (break_block != NULL) {
@@ -697,9 +1219,18 @@ HGraph::HGraph(CompilationInfo* info)
zone_(info->zone()),
is_recursive_(false),
use_optimistic_licm_(false),
+ has_soft_deoptimize_(false),
type_change_checksum_(0) {
- start_environment_ =
- new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ if (info->IsStub()) {
+ HydrogenCodeStub* stub = info->code_stub();
+ int param_count =
+ stub->GetInterfaceDescriptor(isolate_)->register_param_count_;
+ start_environment_ =
+ new(zone_) HEnvironment(zone_, param_count);
+ } else {
+ start_environment_ =
+ new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+ }
start_environment_->set_ast_id(BailoutId::FunctionEntry());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
@@ -1059,13 +1590,18 @@ void HGraph::AssignDominators() {
}
}
+
// Mark all blocks that are dominated by an unconditional soft deoptimize to
// prevent code motion across those blocks.
void HGraph::PropagateDeoptimizingMark() {
HPhase phase("H_Propagate deoptimizing mark", this);
+ // Skip this phase if there is nothing to be done anyway.
+ if (!has_soft_deoptimize()) return;
MarkAsDeoptimizingRecursively(entry_block());
+ NullifyUnreachableInstructions();
}
+
void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
HBasicBlock* dominated = block->dominated_blocks()->at(i);
@@ -1074,37 +1610,112 @@ void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) {
}
}
-void HGraph::EliminateRedundantPhis() {
- HPhase phase("H_Redundant phi elimination", this);
- // Worklist of phis that can potentially be eliminated. Initialized with
- // all phi nodes. When elimination of a phi node modifies another phi node
- // the modified phi node is added to the worklist.
- ZoneList<HPhi*> worklist(blocks_.length(), zone());
- for (int i = 0; i < blocks_.length(); ++i) {
- worklist.AddAll(*blocks_[i]->phis(), zone());
+void HGraph::NullifyUnreachableInstructions() {
+ if (!FLAG_unreachable_code_elimination) return;
+ int block_count = blocks_.length();
+ for (int i = 0; i < block_count; ++i) {
+ HBasicBlock* block = blocks_.at(i);
+ bool nullify = false;
+ const ZoneList<HBasicBlock*>* predecessors = block->predecessors();
+ int predecessors_length = predecessors->length();
+ bool all_predecessors_deoptimizing = (predecessors_length > 0);
+ for (int j = 0; j < predecessors_length; ++j) {
+ if (!predecessors->at(j)->IsDeoptimizing()) {
+ all_predecessors_deoptimizing = false;
+ break;
+ }
+ }
+ if (all_predecessors_deoptimizing) nullify = true;
+ for (HInstruction* instr = block->first(); instr != NULL;
+ instr = instr->next()) {
+ // Leave the basic structure of the graph intact.
+ if (instr->IsBlockEntry()) continue;
+ if (instr->IsControlInstruction()) continue;
+ if (instr->IsSimulate()) continue;
+ if (instr->IsEnterInlined()) continue;
+ if (instr->IsLeaveInlined()) continue;
+ if (nullify) {
+ HInstruction* last_dummy = NULL;
+ for (int j = 0; j < instr->OperandCount(); ++j) {
+ HValue* operand = instr->OperandAt(j);
+ // Insert an HDummyUse for each operand, unless the operand
+ // is an HDummyUse itself. If it's even from the same block,
+ // remember it as a potential replacement for the instruction.
+ if (operand->IsDummyUse()) {
+ if (operand->block() == instr->block() &&
+ last_dummy == NULL) {
+ last_dummy = HInstruction::cast(operand);
+ }
+ continue;
+ }
+ if (operand->IsControlInstruction()) {
+ // Inserting a dummy use for a value that's not defined anywhere
+ // will fail. Some instructions define fake inputs on such
+ // values as control flow dependencies.
+ continue;
+ }
+ HDummyUse* dummy = new(zone()) HDummyUse(operand);
+ dummy->InsertBefore(instr);
+ last_dummy = dummy;
+ }
+ if (last_dummy == NULL) last_dummy = GetConstant1();
+ instr->DeleteAndReplaceWith(last_dummy);
+ continue;
+ }
+ if (instr->IsSoftDeoptimize()) {
+ ASSERT(block->IsDeoptimizing());
+ nullify = true;
+ }
+ }
}
+}
- while (!worklist.is_empty()) {
- HPhi* phi = worklist.RemoveLast();
- HBasicBlock* block = phi->block();
-
- // Skip phi node if it was already replaced.
- if (block == NULL) continue;
- // Get replacement value if phi is redundant.
- HValue* replacement = phi->GetRedundantReplacement();
+// Replace all phis consisting of a single non-loop operand plus any number of
+// loop operands by that single non-loop operand.
+void HGraph::EliminateRedundantPhis() {
+ HPhase phase("H_Redundant phi elimination", this);
- if (replacement != NULL) {
- // Iterate through the uses and replace them all.
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- HValue* value = it.value();
- value->SetOperandAt(it.index(), replacement);
- if (value->IsPhi()) worklist.Add(HPhi::cast(value), zone());
+ // We do a simple fixed point iteration without any work list, because
+ // machine-generated JavaScript can lead to a very dense Hydrogen graph with
+ // an enormous work list and will consequently result in OOM. Experiments
+ // showed that this simple algorithm is good enough, and even e.g. tracking
+ // the set or range of blocks to consider is not a real improvement.
+ bool need_another_iteration;
+ ZoneList<HPhi*> redundant_phis(blocks_.length(), zone());
+ do {
+ need_another_iteration = false;
+ for (int i = 0; i < blocks_.length(); ++i) {
+ HBasicBlock* block = blocks_[i];
+ for (int j = 0; j < block->phis()->length(); j++) {
+ HPhi* phi = block->phis()->at(j);
+ HValue* replacement = phi->GetRedundantReplacement();
+ if (replacement != NULL) {
+ // Remember phi to avoid concurrent modification of the block's phis.
+ redundant_phis.Add(phi, zone());
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ HValue* value = it.value();
+ value->SetOperandAt(it.index(), replacement);
+ need_another_iteration |= value->IsPhi();
+ }
+ }
}
- block->RemovePhi(phi);
+ for (int i = 0; i < redundant_phis.length(); i++) {
+ block->RemovePhi(redundant_phis[i]);
+ }
+ redundant_phis.Clear();
+ }
+ } while (need_another_iteration);
+
+#if DEBUG
+ // Make sure that we *really* removed all redundant phis.
+ for (int i = 0; i < blocks_.length(); ++i) {
+ for (int j = 0; j < blocks_[i]->phis()->length(); j++) {
+ ASSERT(blocks_[i]->phis()->at(j)->GetRedundantReplacement() == NULL);
}
}
+#endif
}
@@ -1291,12 +1902,12 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->GetInputRepresentation().IsInteger32()) {
+ if (test->representation().IsInteger32()) {
Token::Value op = test->token();
if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
}
- Token::Value inverted_op = Token::InvertCompareOp(op);
+ Token::Value inverted_op = Token::ReverseCompareOp(op);
UpdateControlFlowRange(op, test->left(), test->right());
UpdateControlFlowRange(inverted_op, test->right(), test->left());
}
@@ -1989,7 +2600,7 @@ void HGlobalValueNumberer::ProcessLoopBlock(
bool HGlobalValueNumberer::AllowCodeMotion() {
- return info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count;
+ return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
}
@@ -2190,7 +2801,8 @@ void HGlobalValueNumberer::AnalyzeGraph() {
map->Add(instr, zone());
}
}
- if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+ if (instr->IsLinked() &&
+ instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
HValue* other = dominators->at(i);
GVNFlag changes_flag = HValue::ChangesFlagFromInt(i);
@@ -2239,32 +2851,8 @@ void HGlobalValueNumberer::AnalyzeGraph() {
}
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
-
- private:
- Representation TryChange(HValue* current);
- void AddToWorklist(HValue* current);
- void InferBasedOnInputs(HValue* current);
- void AddDependantsToWorklist(HValue* current);
- void InferBasedOnUses(HValue* current);
-
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsSpecialization()) return;
+ if (current->representation().IsTagged()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
worklist_.Add(current, zone());
@@ -2272,105 +2860,6 @@ void HInferRepresentation::AddToWorklist(HValue* current) {
}
-// This method tries to specialize the representation type of the value
-// given as a parameter. The value is asked to infer its representation type
-// based on its inputs. If the inferred type is more specialized, then this
-// becomes the new representation type of the node.
-void HInferRepresentation::InferBasedOnInputs(HValue* current) {
- Representation r = current->representation();
- if (r.IsSpecialization()) return;
- ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
- Representation inferred = current->InferredRepresentation();
- if (inferred.IsSpecialization()) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on inputs\n",
- current->id(),
- r.Mnemonic(),
- inferred.Mnemonic());
- }
- current->ChangeRepresentation(inferred);
- AddDependantsToWorklist(current);
- }
-}
-
-
-void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- AddToWorklist(it.value());
- }
- for (int i = 0; i < value->OperandCount(); ++i) {
- AddToWorklist(value->OperandAt(i));
- }
-}
-
-
-// This method calculates whether specializing the representation of the value
-// given as the parameter has a benefit in terms of less necessary type
-// conversions. If there is a benefit, then the representation of the value is
-// specialized.
-void HInferRepresentation::InferBasedOnUses(HValue* value) {
- Representation r = value->representation();
- if (r.IsSpecialization() || value->HasNoUses()) return;
- ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
- Representation new_rep = TryChange(value);
- if (!new_rep.IsNone()) {
- if (!value->representation().Equals(new_rep)) {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d representation %s -> %s based on uses\n",
- value->id(),
- r.Mnemonic(),
- new_rep.Mnemonic());
- }
- value->ChangeRepresentation(new_rep);
- AddDependantsToWorklist(value);
- }
- }
-}
-
-
-Representation HInferRepresentation::TryChange(HValue* value) {
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->ObservedInputRepresentation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("%d %s is used by %d %s as %s\n",
- value->id(),
- value->Mnemonic(),
- use->id(),
- use->Mnemonic(),
- rep.Mnemonic());
- }
- if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
- use_count[rep.kind()] += use->LoopWeight();
- }
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int non_tagged_count = double_count + int32_count;
-
- // If a non-loop phi has tagged uses, don't convert it to untagged.
- if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
- return Representation::None();
- }
-
- // Prefer unboxing over boxing, the latter is more expensive.
- if (tagged_count > non_tagged_count) return Representation::None();
-
- // Prefer Integer32 over Double, if possible.
- if (int32_count > 0 && value->IsConvertibleToInteger()) {
- return Representation::Integer32();
- }
-
- if (double_count > 0) return Representation::Double();
-
- return Representation::None();
-}
-
-
void HInferRepresentation::Analyze() {
HPhase phase("H_Infer representations", graph_);
@@ -2421,7 +2910,6 @@ void HInferRepresentation::Analyze() {
it.Advance()) {
HPhi* phi = phi_list->at(it.Current());
phi->set_is_convertible_to_integer(false);
- phi->ResetInteger32Uses();
}
}
@@ -2457,8 +2945,74 @@ void HInferRepresentation::Analyze() {
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
in_worklist_.Remove(current->id());
- InferBasedOnInputs(current);
- InferBasedOnUses(current);
+ current->InferRepresentation(this);
+ }
+
+ // Lastly: any instruction that we don't have representation information
+ // for defaults to Tagged.
+ for (int i = 0; i < graph_->blocks()->length(); ++i) {
+ HBasicBlock* block = graph_->blocks()->at(i);
+ const ZoneList<HPhi*>* phis = block->phis();
+ for (int j = 0; j < phis->length(); ++j) {
+ HPhi* phi = phis->at(j);
+ if (phi->representation().IsNone()) {
+ phi->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->representation().IsNone() &&
+ current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+ current->ChangeRepresentation(Representation::Tagged());
+ }
+ }
+ }
+}
+
+
+void HGraph::MergeRemovableSimulates() {
+ for (int i = 0; i < blocks()->length(); ++i) {
+ HBasicBlock* block = blocks()->at(i);
+ // Always reset the folding candidate at the start of a block.
+ HSimulate* folding_candidate = NULL;
+ // Nasty heuristic: Never remove the first simulate in a block. This
+ // just so happens to have a beneficial effect on register allocation.
+ bool first = true;
+ for (HInstruction* current = block->first();
+ current != NULL; current = current->next()) {
+ if (current->IsLeaveInlined()) {
+ // Never fold simulates from inlined environments into simulates
+ // in the outer environment.
+ // (Before each HEnterInlined, there is a non-foldable HSimulate
+ // anyway, so we get the barrier in the other direction for free.)
+ if (folding_candidate != NULL) {
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ }
+ folding_candidate = NULL;
+ continue;
+ }
+ // If we have an HSimulate and a candidate, perform the folding.
+ if (!current->IsSimulate()) continue;
+ if (first) {
+ first = false;
+ continue;
+ }
+ HSimulate* current_simulate = HSimulate::cast(current);
+ if (folding_candidate != NULL) {
+ folding_candidate->MergeInto(current_simulate);
+ folding_candidate->DeleteAndReplaceWith(NULL);
+ folding_candidate = NULL;
+ }
+ // Check if the current simulate is a candidate for folding.
+ if (current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) {
+ continue;
+ }
+ if (!current_simulate->is_candidate_for_removal()) {
+ continue;
+ }
+ folding_candidate = current_simulate;
+ }
}
}
@@ -2553,7 +3107,6 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
} else {
next = HInstruction::cast(use_value);
}
-
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@@ -2565,7 +3118,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
- new_value = is_truncating
+ new_value = (is_truncating && to.IsInteger32())
? constant->CopyToTruncatedInt32(zone())
: constant->CopyToRepresentation(to, zone());
}
@@ -2625,9 +3178,23 @@ void HGraph::InsertRepresentationChanges() {
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
+ for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+ // If a Phi is used as a non-truncating int32 or as a double,
+ // clear its "truncating" flag.
+ HValue* use = it.value();
+ Representation input_representation =
+ use->RequiredInputRepresentation(it.index());
+ if ((input_representation.IsInteger32() &&
+ !use->CheckFlag(HValue::kTruncatingToInt32)) ||
+ input_representation.IsDouble()) {
+ if (FLAG_trace_representation) {
+ PrintF("#%d Phi is not truncating because of #%d %s\n",
+ phi->id(), it.value()->id(), it.value()->Mnemonic());
+ }
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ change = true;
+ break;
+ }
}
}
}
@@ -2642,8 +3209,9 @@ void HGraph::InsertRepresentationChanges() {
// Process normal instructions.
HInstruction* current = blocks_[i]->first();
while (current != NULL) {
+ HInstruction* next = current->next();
InsertRepresentationChangesForValue(current);
- current = current->next();
+ current = next;
}
}
}
@@ -2932,7 +3500,7 @@ void HGraph::ComputeMinusZeroChecks() {
// Implementation of utility class to encapsulate the translation state for
// a (possibly inlined) function.
-FunctionState::FunctionState(HGraphBuilder* owner,
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind)
@@ -2981,7 +3549,7 @@ FunctionState::~FunctionState() {
// Implementation of utility classes to represent an expression's context in
// the AST.
-AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind)
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
: owner_(owner),
kind_(kind),
outer_(owner->ast_context()),
@@ -3038,7 +3606,9 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
@@ -3062,7 +3632,9 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
}
@@ -3088,13 +3660,13 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
builder->AddInstruction(instr);
// We expect a simulate after every expression with side effects, though
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id);
+ builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder->Pop();
}
BuildBranch(instr);
@@ -3119,10 +3691,20 @@ void TestContext::BuildBranch(HValue* value) {
// connects a branch node to a join node. We conservatively ensure that
// property by always adding an empty block on the outgoing edges of this
// branch.
- HGraphBuilder* builder = owner();
+ HOptimizedGraphBuilder* builder = owner();
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout("arguments object value in a test context");
}
+ if (value->IsConstant()) {
+ HConstant* constant_value = HConstant::cast(value);
+ if (constant_value->ToBoolean()) {
+ builder->current_block()->Goto(if_true(), builder->function_state());
+ } else {
+ builder->current_block()->Goto(if_false(), builder->function_state());
+ }
+ builder->set_current_block(NULL);
+ return;
+ }
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
TypeFeedbackId test_id = condition()->test_id();
@@ -3130,13 +3712,13 @@ void TestContext::BuildBranch(HValue* value) {
HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected);
builder->current_block()->Finish(test);
- empty_true->Goto(if_true(), owner()->function_state());
- empty_false->Goto(if_false(), owner()->function_state());
+ empty_true->Goto(if_true(), builder->function_state());
+ empty_false->Goto(if_false(), builder->function_state());
builder->set_current_block(NULL);
}
-// HGraphBuilder infrastructure for bailing out and checking bailouts.
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
#define CHECK_BAILOUT(call) \
do { \
call; \
@@ -3151,25 +3733,26 @@ void TestContext::BuildBranch(HValue* value) {
} while (false)
-void HGraphBuilder::Bailout(const char* reason) {
+void HOptimizedGraphBuilder::Bailout(const char* reason) {
info()->set_bailout_reason(reason);
SetStackOverflow();
}
-void HGraphBuilder::VisitForEffect(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
EffectContext for_effect(this);
Visit(expr);
}
-void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) {
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+ ArgumentsAllowedFlag flag) {
ValueContext for_value(this, flag);
Visit(expr);
}
-void HGraphBuilder::VisitForTypeOf(Expression* expr) {
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
for_value.set_for_typeof(true);
Visit(expr);
@@ -3177,119 +3760,137 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) {
-void HGraphBuilder::VisitForControl(Expression* expr,
- HBasicBlock* true_block,
- HBasicBlock* false_block) {
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+ HBasicBlock* true_block,
+ HBasicBlock* false_block) {
TestContext for_test(this, expr, oracle(), true_block, false_block);
Visit(expr);
}
-void HGraphBuilder::VisitArgument(Expression* expr) {
+void HOptimizedGraphBuilder::VisitArgument(Expression* expr) {
CHECK_ALIVE(VisitForValue(expr));
Push(AddInstruction(new(zone()) HPushArgument(Pop())));
}
-void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) {
+void HOptimizedGraphBuilder::VisitArgumentList(
+ ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
CHECK_ALIVE(VisitArgument(arguments->at(i)));
}
}
-void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) {
+void HOptimizedGraphBuilder::VisitExpressions(
+ ZoneList<Expression*>* exprs) {
for (int i = 0; i < exprs->length(); ++i) {
CHECK_ALIVE(VisitForValue(exprs->at(i)));
}
}
-HGraph* HGraphBuilder::CreateGraph() {
- graph_ = new(zone()) HGraph(info());
- if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info());
+bool HOptimizedGraphBuilder::BuildGraph() {
+ Scope* scope = info()->scope();
+ if (scope->HasIllegalRedeclaration()) {
+ Bailout("function with illegal redeclaration");
+ return false;
+ }
+ if (scope->calls_eval()) {
+ Bailout("function calls eval");
+ return false;
+ }
+ SetUpScope(scope);
+
+ // Add an edge to the body entry. This is warty: the graph's start
+ // environment will be used by the Lithium translation as the initial
+ // environment on graph entry, but it has now been mutated by the
+ // Hydrogen translation of the instructions in the start block. This
+ // environment uses values which have not been defined yet. These
+ // Hydrogen instructions will then be replayed by the Lithium
+ // translation, so they cannot have an environment effect. The edge to
+ // the body's entry block (along with some special logic for the start
+ // block in HInstruction::InsertAfter) seals the start block from
+ // getting unwanted instructions inserted.
+ //
+ // TODO(kmillikin): Fix this. Stop mutating the initial environment.
+ // Make the Hydrogen instructions in the initial block into Hydrogen
+ // values (but not instructions), present in the initial environment and
+ // not replayed by the Lithium translation.
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ current_block()->Goto(body_entry);
+ body_entry->SetJoinId(BailoutId::FunctionEntry());
+ set_current_block(body_entry);
+
+ // Handle implicit declaration of the function name in named function
+ // expressions before other declarations.
+ if (scope->is_function_scope() && scope->function() != NULL) {
+ VisitVariableDeclaration(scope->function());
+ }
+ VisitDeclarations(scope->declarations());
+ AddSimulate(BailoutId::Declarations());
- {
- HPhase phase("H_Block building");
- current_block_ = graph()->entry_block();
+ HValue* context = environment()->LookupContext();
+ AddInstruction(
+ new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
- Scope* scope = info()->scope();
- if (scope->HasIllegalRedeclaration()) {
- Bailout("function with illegal redeclaration");
- return NULL;
- }
- if (scope->calls_eval()) {
- Bailout("function calls eval");
- return NULL;
- }
- SetUpScope(scope);
-
- // Add an edge to the body entry. This is warty: the graph's start
- // environment will be used by the Lithium translation as the initial
- // environment on graph entry, but it has now been mutated by the
- // Hydrogen translation of the instructions in the start block. This
- // environment uses values which have not been defined yet. These
- // Hydrogen instructions will then be replayed by the Lithium
- // translation, so they cannot have an environment effect. The edge to
- // the body's entry block (along with some special logic for the start
- // block in HInstruction::InsertAfter) seals the start block from
- // getting unwanted instructions inserted.
- //
- // TODO(kmillikin): Fix this. Stop mutating the initial environment.
- // Make the Hydrogen instructions in the initial block into Hydrogen
- // values (but not instructions), present in the initial environment and
- // not replayed by the Lithium translation.
- HEnvironment* initial_env = environment()->CopyWithoutHistory();
- HBasicBlock* body_entry = CreateBasicBlock(initial_env);
- current_block()->Goto(body_entry);
- body_entry->SetJoinId(BailoutId::FunctionEntry());
- set_current_block(body_entry);
-
- // Handle implicit declaration of the function name in named function
- // expressions before other declarations.
- if (scope->is_function_scope() && scope->function() != NULL) {
- VisitVariableDeclaration(scope->function());
- }
- VisitDeclarations(scope->declarations());
- AddSimulate(BailoutId::Declarations());
+ VisitStatements(info()->function()->body());
+ if (HasStackOverflow()) return false;
- HValue* context = environment()->LookupContext();
- AddInstruction(
- new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry));
+ if (current_block() != NULL) {
+ HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined(),
+ context);
+ current_block()->FinishExit(instr);
+ set_current_block(NULL);
+ }
- VisitStatements(info()->function()->body());
- if (HasStackOverflow()) return NULL;
+ // If the checksum of the number of type info changes is the same as the
+ // last time this function was compiled, then this recompile is likely not
+ // due to missing/inadequate type feedback, but rather too aggressive
+ // optimization. Disable optimistic LICM in that case.
+ Handle<Code> unoptimized_code(info()->shared_info()->code());
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<TypeFeedbackInfo> type_info(
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ int checksum = type_info->own_type_change_checksum();
+ int composite_checksum = graph()->update_type_change_checksum(checksum);
+ graph()->set_use_optimistic_licm(
+ !type_info->matches_inlined_type_change_checksum(composite_checksum));
+ type_info->set_inlined_type_change_checksum(composite_checksum);
- if (current_block() != NULL) {
- HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined());
- current_block()->FinishExit(instr);
- set_current_block(NULL);
- }
+ return true;
+}
- // If the checksum of the number of type info changes is the same as the
- // last time this function was compiled, then this recompile is likely not
- // due to missing/inadequate type feedback, but rather too aggressive
- // optimization. Disable optimistic LICM in that case.
- Handle<Code> unoptimized_code(info()->shared_info()->code());
- ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
- Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
- int checksum = type_info->own_type_change_checksum();
- int composite_checksum = graph()->update_type_change_checksum(checksum);
- graph()->set_use_optimistic_licm(
- !type_info->matches_inlined_type_change_checksum(composite_checksum));
- type_info->set_inlined_type_change_checksum(composite_checksum);
- }
- return graph();
+void HGraph::GlobalValueNumbering() {
+ // Perform common subexpression elimination and loop-invariant code motion.
+ if (FLAG_use_gvn) {
+ HPhase phase("H_Global value numbering", this);
+ HGlobalValueNumberer gvn(this, info());
+ bool removed_side_effects = gvn.Analyze();
+ // Trigger a second analysis pass to further eliminate duplicate values that
+ // could only be discovered by removing side-effect-generating instructions
+ // during the first pass.
+ if (FLAG_smi_only_arrays && removed_side_effects) {
+ removed_side_effects = gvn.Analyze();
+ ASSERT(!removed_side_effects);
+ }
+ }
}
+
bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
*bailout_reason = SmartArrayPointer<char>();
OrderBlocks();
AssignDominators();
+ // We need to create a HConstant "zero" now so that GVN will fold every
+ // zero-valued constant in the graph together.
+ // The constant is needed to make idef-based bounds check work: the pass
+ // evaluates relations with "zero" and that zero cannot be created after GVN.
+ GetConstant0();
+
#ifdef DEBUG
// Do a full verify after building the graph and computing dominators.
Verify(true);
@@ -3321,6 +3922,11 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
HInferRepresentation rep(this);
rep.Analyze();
+ // Remove HSimulate instructions that have turned out not to be needed
+ // after all by folding them into the following HSimulate.
+ // This must happen after inferring representations.
+ MergeRemovableSimulates();
+
MarkDeoptimizeOnUndefined();
InsertRepresentationChanges();
@@ -3333,19 +3939,7 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
Canonicalize();
- // Perform common subexpression elimination and loop-invariant code motion.
- if (FLAG_use_gvn) {
- HPhase phase("H_Global value numbering", this);
- HGlobalValueNumberer gvn(this, info());
- bool removed_side_effects = gvn.Analyze();
- // Trigger a second analysis pass to further eliminate duplicate values that
- // could only be discovered by removing side-effect-generating instructions
- // during the first pass.
- if (FLAG_smi_only_arrays && removed_side_effects) {
- removed_side_effects = gvn.Analyze();
- ASSERT(!removed_side_effects);
- }
- }
+ GlobalValueNumbering();
if (FLAG_use_range) {
HRangeAnalysis rangeAnalysis(this);
@@ -3357,14 +3951,54 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
HStackCheckEliminator sce(this);
sce.Process();
- EliminateRedundantBoundsChecks();
- DehoistSimpleArrayIndexComputations();
+ if (FLAG_idefs) SetupInformativeDefinitions();
+ if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) {
+ EliminateRedundantBoundsChecks();
+ }
+ if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations();
if (FLAG_dead_code_elimination) DeadCodeElimination();
+ RestoreActualValues();
+
return true;
}
+void HGraph::SetupInformativeDefinitionsInBlock(HBasicBlock* block) {
+ for (int phi_index = 0; phi_index < block->phis()->length(); phi_index++) {
+ HPhi* phi = block->phis()->at(phi_index);
+ phi->AddInformativeDefinitions();
+ phi->SetFlag(HValue::kIDefsProcessingDone);
+ // We do not support phis that "redefine just one operand".
+ ASSERT(!phi->IsInformativeDefinition());
+ }
+
+ for (HInstruction* i = block->first(); i != NULL; i = i->next()) {
+ i->AddInformativeDefinitions();
+ i->SetFlag(HValue::kIDefsProcessingDone);
+ i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions();
+ }
+}
+
+
+// This method is recursive, so if its stack frame is large it could
+// cause a stack overflow.
+// To keep the individual stack frames small we do the actual work inside
+// SetupInformativeDefinitionsInBlock();
+void HGraph::SetupInformativeDefinitionsRecursively(HBasicBlock* block) {
+ SetupInformativeDefinitionsInBlock(block);
+ for (int i = 0; i < block->dominated_blocks()->length(); ++i) {
+ SetupInformativeDefinitionsRecursively(block->dominated_blocks()->at(i));
+ }
+}
+
+
+void HGraph::SetupInformativeDefinitions() {
+ HPhase phase("H_Setup informative definitions", this);
+ SetupInformativeDefinitionsRecursively(entry_block());
+}
+
+
// We try to "factor up" HBoundsCheck instructions towards the root of the
// dominator tree.
// For now we handle checks where the index is like "exp + int32value".
@@ -3477,7 +4111,10 @@ class BoundsCheckBbData: public ZoneObject {
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
- void CoverCheck(HBoundsCheck* new_check,
+ //
+ // If the check cannot be modified because the context is unknown it
+ // returns false, otherwise it returns true.
+ bool CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsInteger32());
bool keep_new_check = false;
@@ -3488,12 +4125,14 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
+ bool result = BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ upper_check_->ReplaceAllUsesWith(upper_check_->index());
upper_check_->SetOperandAt(0, added_upper_index_);
}
} else if (new_offset < lower_offset_) {
@@ -3502,12 +4141,14 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
+ bool result = BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
+ if (!result) return false;
+ lower_check_->ReplaceAllUsesWith(lower_check_->index());
lower_check_->SetOperandAt(0, added_lower_index_);
}
} else {
@@ -3515,8 +4156,10 @@ class BoundsCheckBbData: public ZoneObject {
}
if (!keep_new_check) {
- new_check->DeleteAndReplaceWith(NULL);
+ new_check->DeleteAndReplaceWith(new_check->ActualValue());
}
+
+ return true;
}
void RemoveZeroOperations() {
@@ -3552,29 +4195,42 @@ class BoundsCheckBbData: public ZoneObject {
HBasicBlock* basic_block_;
HBoundsCheck* lower_check_;
HBoundsCheck* upper_check_;
- HAdd* added_lower_index_;
+ HInstruction* added_lower_index_;
HConstant* added_lower_offset_;
- HAdd* added_upper_index_;
+ HInstruction* added_upper_index_;
HConstant* added_upper_offset_;
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- void BuildOffsetAdd(HBoundsCheck* check,
- HAdd** add,
+ // Given an existing add instruction and a bounds check it tries to
+ // find the current context (either of the add or of the check index).
+ HValue* IndexContext(HInstruction* add, HBoundsCheck* check) {
+ if (add != NULL && add->IsAdd()) {
+ return HAdd::cast(add)->context();
+ }
+ if (check->index()->IsBinaryOperation()) {
+ return HBinaryOperation::cast(check->index())->context();
+ }
+ return NULL;
+ }
+
+ // This function returns false if it cannot build the add because the
+ // current context cannot be determined.
+ bool BuildOffsetAdd(HBoundsCheck* check,
+ HInstruction** add,
HConstant** constant,
HValue* original_value,
Representation representation,
int32_t new_offset) {
+ HValue* index_context = IndexContext(*add, check);
+ if (index_context == NULL) return false;
+
HConstant* new_constant = new(BasicBlock()->zone())
HConstant(new_offset, Representation::Integer32());
if (*add == NULL) {
new_constant->InsertBefore(check);
- // Because of the bounds checks elimination algorithm, the index is always
- // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation.
- HValue* context = HBinaryOperation::cast(check->index())->context();
- *add = new(BasicBlock()->zone()) HAdd(context,
- original_value,
- new_constant);
+ (*add) = HAdd::New(
+ BasicBlock()->zone(), index_context, original_value, new_constant);
(*add)->AssumeRepresentation(representation);
(*add)->InsertBefore(check);
} else {
@@ -3582,11 +4238,12 @@ class BoundsCheckBbData: public ZoneObject {
(*constant)->DeleteAndReplaceWith(new_constant);
}
*constant = new_constant;
+ return true;
}
- void RemoveZeroAdd(HAdd** add, HConstant** constant) {
- if (*add != NULL && (*constant)->Integer32Value() == 0) {
- (*add)->DeleteAndReplaceWith((*add)->left());
+ void RemoveZeroAdd(HInstruction** add, HConstant** constant) {
+ if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) {
+ (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left());
(*constant)->DeleteAndReplaceWith(NULL);
}
}
@@ -3634,10 +4291,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
if (!i->IsBoundsCheck()) continue;
HBoundsCheck* check = HBoundsCheck::cast(i);
- check->ReplaceAllUsesWith(check->index());
-
- if (!FLAG_array_bounds_checks_elimination) continue;
-
int32_t offset;
BoundsCheckKey* key =
BoundsCheckKey::Create(zone(), check, &offset);
@@ -3655,10 +4308,12 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
NULL);
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
- check->DeleteAndReplaceWith(NULL);
- } else if (data->BasicBlock() == bb) {
- data->CoverCheck(check, offset);
- } else {
+ check->DeleteAndReplaceWith(check->ActualValue());
+ } else if (data->BasicBlock() != bb ||
+ !data->CoverCheck(check, offset)) {
+ // If the check is in the current BB we try to modify it by calling
+ // "CoverCheck", but if also that fails we record the current offsets
+ // in a new data instance because from now on they are covered.
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
@@ -3702,7 +4357,7 @@ void HGraph::EliminateRedundantBoundsChecks() {
static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
- HValue* index = array_operation->GetKey();
+ HValue* index = array_operation->GetKey()->ActualValue();
if (!index->representation().IsInteger32()) return;
HConstant* constant;
@@ -3750,8 +4405,6 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
void HGraph::DehoistSimpleArrayIndexComputations() {
- if (!FLAG_array_index_dehoisting) return;
-
HPhase phase("H_Dehoist index computations", this);
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
@@ -3803,33 +4456,58 @@ void HGraph::DeadCodeElimination() {
}
-HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
- ASSERT(current_block() != NULL);
- current_block()->AddInstruction(instr);
- return instr;
-}
+void HGraph::RestoreActualValues() {
+ HPhase phase("H_Restore actual values", this);
+ for (int block_index = 0; block_index < blocks()->length(); block_index++) {
+ HBasicBlock* block = blocks()->at(block_index);
-void HGraphBuilder::AddSimulate(BailoutId ast_id) {
- ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id);
+#ifdef DEBUG
+ for (int i = 0; i < block->phis()->length(); i++) {
+ HPhi* phi = block->phis()->at(i);
+ ASSERT(phi->ActualValue() == phi);
+ }
+#endif
+
+ for (HInstruction* instruction = block->first();
+ instruction != NULL;
+ instruction = instruction->next()) {
+ if (instruction->ActualValue() != instruction) {
+ ASSERT(instruction->IsInformativeDefinition());
+ if (instruction->IsPurelyInformativeDefinition()) {
+ instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
+ } else {
+ instruction->ReplaceAllUsesWith(instruction->ActualValue());
+ }
+ }
+ }
+ }
}
-void HGraphBuilder::AddPhi(HPhi* instr) {
+void HOptimizedGraphBuilder::AddPhi(HPhi* instr) {
ASSERT(current_block() != NULL);
current_block()->AddPhi(instr);
}
-void HGraphBuilder::PushAndAdd(HInstruction* instr) {
+void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) {
Push(instr);
AddInstruction(instr);
}
+void HOptimizedGraphBuilder::AddSoftDeoptimize() {
+ if (FLAG_always_opt) return;
+ if (current_block()->IsDeoptimizing()) return;
+ AddInstruction(new(zone()) HSoftDeoptimize());
+ current_block()->MarkAsDeoptimizing();
+ graph()->set_has_soft_deoptimize(true);
+}
+
+
template <class Instruction>
-HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
int count = call->argument_count();
ZoneList<HValue*> arguments(count, zone());
for (int i = 0; i < count; ++i) {
@@ -3843,11 +4521,11 @@ HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) {
}
-void HGraphBuilder::SetUpScope(Scope* scope) {
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
HConstant* undefined_constant = new(zone()) HConstant(
isolate()->factory()->undefined_value(), Representation::Tagged());
AddInstruction(undefined_constant);
- graph_->set_undefined_constant(undefined_constant);
+ graph()->set_undefined_constant(undefined_constant);
HArgumentsObject* object = new(zone()) HArgumentsObject;
AddInstruction(object);
@@ -3886,30 +4564,14 @@ void HGraphBuilder::SetUpScope(Scope* scope) {
}
-void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
CHECK_ALIVE(Visit(statements->at(i)));
}
}
-HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
- HBasicBlock* b = graph()->CreateBasicBlock();
- b->SetInitialEnvironment(env);
- return b;
-}
-
-
-HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
- HBasicBlock* header = graph()->CreateBasicBlock();
- HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
- header->SetInitialEnvironment(entry_env);
- header->AttachLoopInformation();
- return header;
-}
-
-
-void HGraphBuilder::VisitBlock(Block* stmt) {
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3929,7 +4591,8 @@ void HGraphBuilder::VisitBlock(Block* stmt) {
}
-void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+ ExpressionStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3937,14 +4600,14 @@ void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
}
-void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
}
-void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -3983,7 +4646,7 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
}
-HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
BreakableStatement* stmt,
BreakType type,
int* drop_extra) {
@@ -4022,7 +4685,8 @@ HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get(
}
-void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+void HOptimizedGraphBuilder::VisitContinueStatement(
+ ContinueStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4036,7 +4700,7 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
}
-void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4050,7 +4714,7 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
}
-void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4060,7 +4724,9 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
// Not an inlined return, so an actual one.
CHECK_ALIVE(VisitForValue(stmt->expression()));
HValue* result = environment()->Pop();
- current_block()->FinishExit(new(zone()) HReturn(result));
+ current_block()->FinishExit(new(zone()) HReturn(
+ result,
+ environment()->LookupContext()));
} else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
// Return from an inlined construct call. In a test context the return value
// will always evaluate to true, in a value context the return value needs
@@ -4087,7 +4753,19 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
typecheck->SetSuccessorAt(1, not_spec_object);
current_block()->Finish(typecheck);
if_spec_object->AddLeaveInlined(return_value, state);
- not_spec_object->AddLeaveInlined(receiver, state);
+ if (!FLAG_harmony_symbols) {
+ not_spec_object->AddLeaveInlined(receiver, state);
+ } else {
+ HHasInstanceTypeAndBranch* symbolcheck =
+ new(zone()) HHasInstanceTypeAndBranch(return_value, SYMBOL_TYPE);
+ HBasicBlock* is_symbol = graph()->CreateBasicBlock();
+ HBasicBlock* not_symbol = graph()->CreateBasicBlock();
+ symbolcheck->SetSuccessorAt(0, is_symbol);
+ symbolcheck->SetSuccessorAt(1, not_symbol);
+ not_spec_object->Finish(symbolcheck);
+ is_symbol->AddLeaveInlined(return_value, state);
+ not_symbol->AddLeaveInlined(receiver, state);
+ }
}
} else if (state->inlining_kind() == SETTER_CALL_RETURN) {
// Return from an inlined setter call. The returned value is never used, the
@@ -4122,7 +4800,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
}
-void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4130,7 +4808,7 @@ void HGraphBuilder::VisitWithStatement(WithStatement* stmt) {
}
-void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4169,7 +4847,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
!clause->label()->IsStringLiteral()) ||
(switch_type == SMI_SWITCH &&
!clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatemnt: mixed label types are not supported");
+ return Bailout("SwitchStatement: mixed label types are not supported");
}
}
@@ -4223,12 +4901,13 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
new(zone()) HCompareIDAndBranch(tag_value,
label_value,
Token::EQ_STRICT);
- compare_->SetInputRepresentation(Representation::Integer32());
+ compare_->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
compare = compare_;
} else {
compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
@@ -4315,12 +4994,12 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
-bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) {
return statement->OsrEntryId() == info()->osr_ast_id();
}
-bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
+bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
if (!HasOsrEntryAt(statement)) return false;
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
@@ -4333,6 +5012,7 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
non_osr_entry->Goto(loop_predecessor);
set_current_block(osr_entry);
+ osr_entry->set_osr_entry();
BailoutId osr_entry_id = statement->OsrEntryId();
int first_expression_index = environment()->first_expression_index();
int length = environment()->length();
@@ -4370,9 +5050,9 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
}
-void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
- HBasicBlock* loop_entry,
- BreakAndContinueInfo* break_info) {
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+ HBasicBlock* loop_entry,
+ BreakAndContinueInfo* break_info) {
BreakAndContinueScope push(break_info, this);
AddSimulate(stmt->StackCheckId());
HValue* context = environment()->LookupContext();
@@ -4385,7 +5065,7 @@ void HGraphBuilder::VisitLoopBody(IterationStatement* stmt,
}
-void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4428,7 +5108,7 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
}
-void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4472,7 +5152,7 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
}
-void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4524,7 +5204,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
}
-void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4560,7 +5240,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map));
HInstruction* start_index = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(0)), Representation::Integer32()));
+ Handle<Object>(Smi::FromInt(0), isolate()), Representation::Integer32()));
Push(map);
Push(array);
@@ -4587,7 +5267,8 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Check that we still have more keys.
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->SetInputRepresentation(Representation::Integer32());
+ compare_index->set_observed_input_representation(
+ Representation::Integer32(), Representation::Integer32());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
@@ -4626,9 +5307,10 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(body_exit);
HValue* current_index = Pop();
- HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(),
- current_index,
- graph()->GetConstant1());
+ HInstruction* new_index = HAdd::New(zone(),
+ environment()->LookupContext(),
+ current_index,
+ graph()->GetConstant1());
new_index->AssumeRepresentation(Representation::Integer32());
PushAndAdd(new_index);
body_exit = current_block();
@@ -4644,7 +5326,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
}
-void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4652,7 +5334,8 @@ void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
}
-void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+ TryFinallyStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4660,7 +5343,7 @@ void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
}
-void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4688,7 +5371,7 @@ static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
}
-void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4707,7 +5390,7 @@ void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
}
-void HGraphBuilder::VisitSharedFunctionInfoLiteral(
+void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral(
SharedFunctionInfoLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -4716,7 +5399,7 @@ void HGraphBuilder::VisitSharedFunctionInfoLiteral(
}
-void HGraphBuilder::VisitConditional(Conditional* expr) {
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4754,8 +5437,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
}
-HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
- Variable* var, LookupResult* lookup, bool is_store) {
+HOptimizedGraphBuilder::GlobalPropertyAccess
+ HOptimizedGraphBuilder::LookupGlobalProperty(
+ Variable* var, LookupResult* lookup, bool is_store) {
if (var->is_this() || !info()->has_global_object()) {
return kUseGeneric;
}
@@ -4771,7 +5455,7 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty(
}
-HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
ASSERT(var->IsContextSlot());
HValue* context = environment()->LookupContext();
int length = info()->scope()->ContextChainLength(var->scope());
@@ -4784,7 +5468,7 @@ HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) {
}
-void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4858,17 +5542,17 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
}
-void HGraphBuilder::VisitLiteral(Literal* expr) {
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::Tagged());
+ new(zone()) HConstant(expr->handle(), Representation::None());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -4907,12 +5591,13 @@ static bool LookupAccessorPair(Handle<Map> map,
Handle<String> name,
Handle<AccessorPair>* accessors,
Handle<JSObject>* holder) {
- LookupResult lookup(map->GetIsolate());
+ Isolate* isolate = map->GetIsolate();
+ LookupResult lookup(isolate);
// Check for a JavaScript accessor directly in the map.
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValueFromMap(*map));
+ Handle<Object> callback(lookup.GetValueFromMap(*map), isolate);
if (!callback->IsAccessorPair()) return false;
*accessors = Handle<AccessorPair>::cast(callback);
*holder = Handle<JSObject>();
@@ -4925,7 +5610,7 @@ static bool LookupAccessorPair(Handle<Map> map,
// Check for a JavaScript accessor somewhere in the proto chain.
LookupInPrototypes(map, name, &lookup);
if (lookup.IsPropertyCallbacks()) {
- Handle<Object> callback(lookup.GetValue());
+ Handle<Object> callback(lookup.GetValue(), isolate);
if (!callback->IsAccessorPair()) return false;
*accessors = Handle<AccessorPair>::cast(callback);
*holder = Handle<JSObject>(lookup.holder());
@@ -4975,9 +5660,10 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
+ Isolate* isolate = boilerplate->GetIsolate();
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
- elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) {
+ elements->map() != isolate->heap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastObjectElements()) {
@@ -4985,7 +5671,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int length = elements->length();
for (int i = 0; i < length; i++) {
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
@@ -5009,7 +5695,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
int nof = boilerplate->map()->inobject_properties();
for (int i = 0; i < nof; i++) {
if ((*max_properties)-- == 0) return false;
- Handle<Object> value(boilerplate->InObjectPropertyAt(i));
+ Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
@@ -5027,7 +5713,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
}
-void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5038,7 +5724,8 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Check whether to use fast or slow deep-copying for boilerplate.
int total_size = 0;
int max_properties = HFastLiteral::kMaxLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()));
+ Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()),
+ isolate());
if (boilerplate->IsJSObject() &&
IsFastLiteral(Handle<JSObject>::cast(boilerplate),
HFastLiteral::kMaxLiteralDepth,
@@ -5049,7 +5736,8 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
boilerplate_object,
total_size,
expr->literal_index(),
- expr->depth());
+ expr->depth(),
+ DONT_TRACK_ALLOCATION_SITE);
} else {
literal = new(zone()) HObjectLiteral(context,
expr->constant_properties(),
@@ -5077,7 +5765,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
property->RecordTypeFeedback(oracle());
CHECK_ALIVE(VisitForValue(value));
@@ -5100,7 +5788,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
map));
}
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(key->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(key->id(), REMOVABLE_SIMULATE);
+ }
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -5130,7 +5820,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
-void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5140,7 +5830,8 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HInstruction* literal;
Handle<FixedArray> literals(environment()->closure()->literals());
- Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
+ Handle<Object> raw_boilerplate(literals->get(expr->literal_index()),
+ isolate());
if (raw_boilerplate->IsUndefined()) {
raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate(
@@ -5157,7 +5848,13 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+ Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+
+ // TODO(mvstanton): This heuristic is only a temporary solution. In the
+ // end, we want to quit creating allocation site info after a certain number
+ // of GCs for a call site.
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(
+ boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
int total_size = 0;
@@ -5166,17 +5863,22 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HFastLiteral::kMaxLiteralDepth,
&max_properties,
&total_size)) {
+ if (mode == TRACK_ALLOCATION_SITE) {
+ total_size += AllocationSiteInfo::kSize;
+ }
literal = new(zone()) HFastLiteral(context,
boilerplate,
total_size,
expr->literal_index(),
- expr->depth());
+ expr->depth(),
+ mode);
} else {
literal = new(zone()) HArrayLiteral(context,
boilerplate,
length,
expr->literal_index(),
- expr->depth());
+ expr->depth(),
+ mode);
}
// The array is expected in the bailout environment during computation
@@ -5201,7 +5903,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
AddInstruction(elements);
HValue* key = AddInstruction(
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
+ new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()),
Representation::Integer32()));
switch (boilerplate_elements_kind) {
@@ -5236,6 +5938,10 @@ static bool ComputeLoadStoreField(Handle<Map> type,
Handle<String> name,
LookupResult* lookup,
bool is_store) {
+ if (type->has_named_interceptor()) {
+ lookup->InterceptorResult(NULL);
+ return false;
+ }
// If we directly find a field, the access can be inlined.
type->LookupDescriptor(NULL, *name, lookup);
if (lookup->IsField()) return true;
@@ -5264,18 +5970,19 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
}
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map) {
+void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
+ Handle<Map> map) {
AddInstruction(new(zone()) HCheckNonSmi(object));
AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
}
-HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map,
- LookupResult* lookup) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map,
+ LookupResult* lookup) {
ASSERT(lookup->IsFound());
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
@@ -5295,13 +6002,16 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
proto = proto_result.holder();
} else {
// Otherwise, find the top prototype.
- while (proto->GetPrototype()->IsJSObject()) proto = proto->GetPrototype();
- ASSERT(proto->GetPrototype()->IsNull());
+ while (proto->GetPrototype(isolate())->IsJSObject()) {
+ proto = proto->GetPrototype(isolate());
+ }
+ ASSERT(proto->GetPrototype(isolate())->IsNull());
}
ASSERT(proto->IsJSObject());
AddInstruction(new(zone()) HCheckPrototypeMaps(
Handle<JSObject>(JSObject::cast(map->prototype())),
- Handle<JSObject>(JSObject::cast(proto))));
+ Handle<JSObject>(JSObject::cast(proto)),
+ zone()));
}
int index = ComputeLoadStoreFieldIndex(map, name, lookup);
@@ -5327,9 +6037,10 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
}
-HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
- Handle<String> name,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreNamedGeneric(
context,
@@ -5340,11 +6051,12 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object,
}
-HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
- HValue* value,
- Handle<Map> map,
- Handle<JSFunction> setter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallSetter(
+ HValue* object,
+ HValue* value,
+ Handle<Map> map,
+ Handle<JSFunction> setter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
AddInstruction(new(zone()) HPushArgument(value));
@@ -5352,10 +6064,11 @@ HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
}
-HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
- Handle<String> name,
- HValue* value,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ HValue* value,
+ Handle<Map> map) {
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
@@ -5368,10 +6081,11 @@ HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
}
-void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
- HValue* object,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField(
+ Property* expr,
+ HValue* object,
+ SmallMapList* types,
+ Handle<String> name) {
int count = 0;
int previous_field_offset = 0;
bool previous_field_is_in_object = false;
@@ -5423,11 +6137,12 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
}
-void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
- HValue* object,
- HValue* value,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField(
+ Assignment* expr,
+ HValue* object,
+ HValue* value,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -5481,10 +6196,10 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(value);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -5499,7 +6214,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
}
-void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(prop != NULL);
expr->RecordTypeFeedback(oracle(), zone());
@@ -5554,7 +6269,9 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
@@ -5571,7 +6288,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
&has_side_effects);
Push(value);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
}
@@ -5580,10 +6297,11 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
// Because not every expression has a position and there is not common
// superclass of Assignment and CountOperation, we cannot just pass the
// owning expression instead of position and ast_id separately.
-void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
- HValue* value,
- int position,
- BailoutId ast_id) {
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+ Variable* var,
+ HValue* value,
+ int position,
+ BailoutId ast_id) {
LookupResult lookup(isolate());
GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true);
if (type == kUseCell) {
@@ -5593,7 +6311,9 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ }
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -5608,12 +6328,12 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
instr->set_position(position);
AddInstruction(instr);
ASSERT(instr->HasObservableSideEffects());
- if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
+ AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
}
-void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Expression* target = expr->target();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -5685,7 +6405,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -5725,7 +6445,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -5733,10 +6455,12 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
HInstruction* store;
- if (!monomorphic) {
+ if (!monomorphic || map->is_observed()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr));
} else {
@@ -5755,7 +6479,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
return ast_context()->ReturnValue(Pop());
} else {
@@ -5771,7 +6497,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5780,7 +6506,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
+ if (instr->HasObservableSideEffects()) {
+ AddSimulate(operation->id(), REMOVABLE_SIMULATE);
+ }
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -5792,7 +6520,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Drop(3);
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
return ast_context()->ReturnValue(Pop());
}
@@ -5802,7 +6530,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
-void HGraphBuilder::VisitAssignment(Assignment* expr) {
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5915,7 +6643,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
return ast_context()->ReturnValue(Pop());
}
@@ -5929,7 +6657,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
}
-void HGraphBuilder::VisitThrow(Throw* expr) {
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -5950,9 +6678,10 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
}
-HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
- Handle<Map> map,
- LookupResult* lookup) {
+HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField(
+ HValue* object,
+ Handle<Map> map,
+ LookupResult* lookup) {
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
@@ -5967,32 +6696,34 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
}
-HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object,
- Handle<String> name,
- Property* expr) {
- if (expr->IsUninitialized() && !FLAG_always_opt) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric(
+ HValue* object,
+ Handle<String> name,
+ Property* expr) {
+ if (expr->IsUninitialized()) {
+ AddSoftDeoptimize();
}
HValue* context = environment()->LookupContext();
return new(zone()) HLoadNamedGeneric(context, object, name);
}
-HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
- Handle<Map> map,
- Handle<JSFunction> getter,
- Handle<JSObject> holder) {
+HInstruction* HOptimizedGraphBuilder::BuildCallGetter(
+ HValue* object,
+ Handle<Map> map,
+ Handle<JSFunction> getter,
+ Handle<JSObject> holder) {
AddCheckConstantFunction(holder, object, map);
AddInstruction(new(zone()) HPushArgument(object));
return new(zone()) HCallConstantFunction(getter, 1);
}
-HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
- Handle<String> name,
- Property* expr,
- Handle<Map> map) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
+ HValue* object,
+ Handle<String> name,
+ Property* expr,
+ Handle<Map> map) {
// Handle a load from a known field.
ASSERT(!map->is_dictionary_map());
LookupResult lookup(isolate());
@@ -6009,198 +6740,62 @@ HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
return new(zone()) HConstant(function, Representation::Tagged());
}
- // Handle a load from a known field somewhere in the protoype chain.
+ // Handle a load from a known field somewhere in the prototype chain.
LookupInPrototypes(map, name, &lookup);
if (lookup.IsField()) {
Handle<JSObject> prototype(JSObject::cast(map->prototype()));
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMapsWithTransitions(object, map);
- HInstruction* holder_value =
- AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder));
+ HInstruction* holder_value = AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
return BuildLoadNamedField(holder_value, holder_map, &lookup);
}
+ // Handle a load of a constant function somewhere in the prototype chain.
+ if (lookup.IsConstantFunction()) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()));
+ Handle<JSObject> holder(lookup.holder());
+ Handle<Map> holder_map(holder->map());
+ AddCheckMapsWithTransitions(object, map);
+ AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map));
+ return new(zone()) HConstant(function, Representation::Tagged());
+ }
+
// No luck, do a generic load.
return BuildLoadNamedGeneric(object, name, expr);
}
-HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
- HValue* key) {
+HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object,
+ HValue* key) {
HValue* context = environment()->LookupContext();
return new(zone()) HLoadKeyedGeneric(context, object, key);
}
-HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
HValue* val,
HValue* dependency,
- ElementsKind elements_kind,
+ Handle<Map> map,
bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS: {
- val = AddInstruction(new(zone()) HClampToUint8(val));
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
- if (!val->representation().IsInteger32()) {
- val = AddInstruction(new(zone()) HChange(
- val,
- Representation::Integer32(),
- true, // Truncate to int32.
- false)); // Don't deoptimize undefined (irrelevant here).
- }
- break;
- }
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- break;
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- return new(zone()) HStoreKeyed(external_elements,
- checked_key,
- val,
- elements_kind);
- } else {
- ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone()) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
- if (FLAG_opt_safe_uint32_operations &&
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- graph()->RecordUint32Instruction(load);
- }
- return load;
- }
-}
-
-
-HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* load_dependency,
- ElementsKind elements_kind,
- bool is_store) {
- if (is_store) {
- ASSERT(val != NULL);
- switch (elements_kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- // Smi-only arrays need a smi check.
- AddInstruction(new(zone()) HCheckSmi(val));
- // Fall through.
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyed(
- elements, checked_key, val, elements_kind);
- default:
- UNREACHABLE();
- return NULL;
- }
- }
- // It's an element load (!is_store).
- return new(zone()) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind);
-}
-
-
-HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HValue* dependency,
- Handle<Map> map,
- bool is_store) {
HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map,
zone(), dependency);
AddInstruction(mapcheck);
if (dependency) {
mapcheck->ClearGVNFlag(kDependsOnElementsKind);
}
- return BuildUncheckedMonomorphicElementAccess(object, key, val,
- mapcheck, map, is_store);
+ return BuildUncheckedMonomorphicElementAccess(
+ object, key, val,
+ mapcheck, map->instance_type() == JS_ARRAY_TYPE,
+ map->elements_kind(), is_store);
}
-HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
- HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store) {
- // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
- // on a HElementsTransition instruction. The flag can also be removed if the
- // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
- // ElementsKind transitions. Finally, the dependency can be removed for stores
- // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
- // generated store code.
- if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) ||
- (map->elements_kind() == FAST_ELEMENTS && is_store)) {
- mapcheck->ClearGVNFlag(kDependsOnElementsKind);
- }
- bool fast_smi_only_elements = map->has_fast_smi_elements();
- bool fast_elements = map->has_fast_object_elements();
- HInstruction* elements =
- AddInstruction(new(zone()) HLoadElements(object, mapcheck));
- if (is_store && (fast_elements || fast_smi_only_elements)) {
- HCheckMaps* check_cow_map = new(zone()) HCheckMaps(
- elements, isolate()->factory()->fixed_array_map(), zone());
- check_cow_map->ClearGVNFlag(kDependsOnElementsKind);
- AddInstruction(check_cow_map);
- }
- HInstruction* length = NULL;
- HInstruction* checked_key = NULL;
- if (map->has_external_array_elements()) {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- HLoadExternalArrayPointer* external_elements =
- new(zone()) HLoadExternalArrayPointer(elements);
- AddInstruction(external_elements);
- return BuildExternalArrayElementAccess(
- external_elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
- }
- ASSERT(fast_smi_only_elements ||
- fast_elements ||
- map->has_fast_double_elements());
- if (map->instance_type() == JS_ARRAY_TYPE) {
- length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck,
- HType::Smi()));
- } else {
- length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- }
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
- return BuildFastElementAccess(elements, checked_key, val, mapcheck,
- map->elements_kind(), is_store);
-}
-
-
-HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
HValue* object,
HValue* key,
HValue* val,
@@ -6248,19 +6843,23 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad(
HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone());
AddInstruction(check_maps);
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
- object, key, val, check_maps, most_general_consolidated_map, false);
+ object, key, val, check_maps,
+ most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+ most_general_consolidated_map->elements_kind(),
+ false);
return instr;
}
-HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- Expression* prop,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ Expression* prop,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
*has_side_effects = false;
AddInstruction(new(zone()) HCheckNonSmi(object));
SmallMapList* maps = prop->GetReceiverTypes();
@@ -6315,8 +6914,9 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
ASSERT(Map::IsValidElementsTransition(
map->elements_kind(),
transition_target.at(i)->elements_kind()));
+ HValue* context = environment()->LookupContext();
transition = new(zone()) HTransitionElementsKind(
- object, map, transition_target.at(i));
+ context, object, map, transition_target.at(i));
AddInstruction(transition);
} else {
type_todo[map->elements_kind()] = true;
@@ -6373,7 +6973,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
&& todo_external_array) {
HInstruction* length =
AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length));
+ checked_key = AddBoundsCheck(key, length);
external_elements = new(zone()) HLoadExternalArrayPointer(elements);
AddInstruction(external_elements);
}
@@ -6415,8 +7015,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HInstruction* length;
length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck,
HType::Smi()));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
+ checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store));
@@ -6432,8 +7031,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
set_current_block(if_fastobject);
length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements));
- checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length,
- ALLOW_SMI_KEY));
+ checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY);
access = AddInstruction(BuildFastElementAccess(
elements, checked_key, val, elements_kind_branch,
elements_kind, is_store));
@@ -6445,8 +7043,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
}
} else { // External array elements.
access = AddInstruction(BuildExternalArrayElementAccess(
- external_elements, checked_key, val, elements_kind_branch,
- elements_kind, is_store));
+ external_elements, checked_key, val,
+ elements_kind_branch, elements_kind, is_store));
}
*has_side_effects |= access->HasObservableSideEffects();
if (position != RelocInfo::kNoPosition) access->set_position(position);
@@ -6466,14 +7064,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
}
-HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
- HValue* key,
- HValue* val,
- Expression* expr,
- BailoutId ast_id,
- int position,
- bool is_store,
- bool* has_side_effects) {
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+ HValue* obj,
+ HValue* key,
+ HValue* val,
+ Expression* expr,
+ BailoutId ast_id,
+ int position,
+ bool is_store,
+ bool* has_side_effects) {
ASSERT(!expr->IsPropertyName());
HInstruction* instr = NULL;
if (expr->IsMonomorphic()) {
@@ -6503,9 +7102,10 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj,
}
-HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
- HValue* key,
- HValue* value) {
+HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric(
+ HValue* object,
+ HValue* key,
+ HValue* value) {
HValue* context = environment()->LookupContext();
return new(zone()) HStoreKeyedGeneric(
context,
@@ -6516,7 +7116,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object,
}
-void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
// Outermost function already has arguments on the stack.
if (function_state()->outer() == NULL) return;
@@ -6544,7 +7144,7 @@ void HGraphBuilder::EnsureArgumentsArePushedForAccess() {
}
-bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
VariableProxy* proxy = expr->obj()->AsVariableProxy();
if (proxy == NULL) return false;
if (!proxy->var()->IsStackAllocated()) return false;
@@ -6555,7 +7155,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
HInstruction* result = NULL;
if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
- if (!name->IsEqualTo(CStrVector("length"))) return false;
+ if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
if (function_state()->outer() == NULL) {
HInstruction* elements = AddInstruction(
@@ -6566,8 +7166,8 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
result = new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count)),
- Representation::Integer32());
+ Handle<Object>(Smi::FromInt(argument_count), isolate()),
+ Representation::Integer32());
}
} else {
Push(graph()->GetArgumentsObject());
@@ -6580,8 +7180,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
new(zone()) HArgumentsElements(false));
HInstruction* length = AddInstruction(
new(zone()) HArgumentsLength(elements));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
+ HInstruction* checked_key = AddBoundsCheck(key, length);
result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
} else {
EnsureArgumentsArePushedForAccess();
@@ -6591,10 +7190,9 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
int argument_count = environment()->
arguments_environment()->parameter_count() - 1;
HInstruction* length = AddInstruction(new(zone()) HConstant(
- Handle<Object>(Smi::FromInt(argument_count)),
- Representation::Integer32()));
- HInstruction* checked_key =
- AddInstruction(new(zone()) HBoundsCheck(key, length));
+ Handle<Object>(Smi::FromInt(argument_count), isolate()),
+ Representation::Integer32()));
+ HInstruction* checked_key = AddBoundsCheck(key, length);
result = new(zone()) HAccessArgumentsAt(elements, length, checked_key);
}
}
@@ -6603,7 +7201,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
}
-void HGraphBuilder::VisitProperty(Property* expr) {
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -6624,16 +7222,16 @@ void HGraphBuilder::VisitProperty(Property* expr) {
HValue* string = Pop();
AddInstruction(new(zone()) HCheckNonSmi(string));
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- instr = new(zone()) HStringLength(string);
+ instr = HStringLength::New(zone(), string);
} else if (expr->IsStringAccess()) {
CHECK_ALIVE(VisitForValue(expr->key()));
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->LookupContext();
- HStringCharCodeAt* char_code =
+ HInstruction* char_code =
BuildStringCharCodeAt(context, string, index);
AddInstruction(char_code);
- instr = new(zone()) HStringCharFromCode(context, char_code);
+ instr = HStringCharFromCode::New(zone(), context, char_code);
} else if (expr->IsFunctionPrototype()) {
HValue* function = Pop();
@@ -6643,12 +7241,16 @@ void HGraphBuilder::VisitProperty(Property* expr) {
} else if (expr->key()->IsPropertyName()) {
Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
SmallMapList* types = expr->GetReceiverTypes();
+ HValue* object = Top();
- bool monomorphic = expr->IsMonomorphic();
Handle<Map> map;
+ bool monomorphic = false;
if (expr->IsMonomorphic()) {
map = types->first();
- if (map->is_dictionary_map()) monomorphic = false;
+ monomorphic = !map->is_dictionary_map();
+ } else if (object->HasMonomorphicJSObjectType()) {
+ map = object->GetMonomorphicJSObjectMap();
+ monomorphic = !map->is_dictionary_map();
}
if (monomorphic) {
Handle<JSFunction> getter;
@@ -6680,10 +7282,10 @@ void HGraphBuilder::VisitProperty(Property* expr) {
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
} else {
Push(load);
- AddSimulate(expr->id());
+ AddSimulate(expr->id(), REMOVABLE_SIMULATE);
Drop(1);
}
}
@@ -6694,18 +7296,20 @@ void HGraphBuilder::VisitProperty(Property* expr) {
}
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+ Handle<Map> receiver_map) {
if (!holder.is_null()) {
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
+ Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
+ AddInstruction(
+ new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
}
}
-void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
- HValue* receiver,
- Handle<Map> receiver_map) {
+void HOptimizedGraphBuilder::AddCheckConstantFunction(
+ Handle<JSObject> holder,
+ HValue* receiver,
+ Handle<Map> receiver_map) {
// Constant functions have the nice property that the map will change if they
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
@@ -6747,10 +7351,11 @@ static int CompareHotness(void const* a, void const* b) {
}
-void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
- HValue* receiver,
- SmallMapList* types,
- Handle<String> name) {
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(
+ Call* expr,
+ HValue* receiver,
+ SmallMapList* types,
+ Handle<String> name) {
// TODO(ager): We should recognize when the prototype chains for different
// maps are identical. In that case we can avoid repeatedly generating the
// same prototype map checks.
@@ -6852,9 +7457,9 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
}
-void HGraphBuilder::TraceInline(Handle<JSFunction> target,
- Handle<JSFunction> caller,
- const char* reason) {
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+ Handle<JSFunction> caller,
+ const char* reason) {
if (FLAG_trace_inlining) {
SmartArrayPointer<char> target_name =
target->shared()->DebugName()->ToCString();
@@ -6873,7 +7478,7 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target,
static const int kNotInlinable = 1000000000;
-int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
if (!FLAG_use_inlining) return kNotInlinable;
// Precondition: call is monomorphic and we have found a target with the
@@ -6904,13 +7509,13 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
}
-bool HGraphBuilder::TryInline(CallKind call_kind,
- Handle<JSFunction> target,
- int arguments_count,
- HValue* implicit_return_value,
- BailoutId ast_id,
- BailoutId return_id,
- InliningKind inlining_kind) {
+bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
+ Handle<JSFunction> target,
+ int arguments_count,
+ HValue* implicit_return_value,
+ BailoutId ast_id,
+ BailoutId return_id,
+ InliningKind inlining_kind) {
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -6921,8 +7526,6 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
return false;
}
- Handle<SharedFunctionInfo> target_shared(target->shared());
-
#if !defined(V8_TARGET_ARCH_IA32)
// Target must be able to use caller's context.
CompilationInfo* outer_info = info();
@@ -6953,7 +7556,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
for (FunctionState* state = function_state();
state != NULL;
state = state->outer()) {
- if (state->compilation_info()->closure()->shared() == *target_shared) {
+ if (*state->compilation_info()->closure() == *target) {
TraceInline(target, caller, "target is recursive");
return false;
}
@@ -6968,6 +7571,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
// Parse and allocate variables.
CompilationInfo target_info(target, zone());
+ Handle<SharedFunctionInfo> target_shared(target->shared());
if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
!Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
@@ -7035,7 +7639,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
TraceInline(target, caller, "could not generate deoptimization info");
return false;
}
- if (target_shared->scope_info() == ScopeInfo::Empty()) {
+ if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
// The scope info might not have been set if a lazily compiled
// function is inlined before being called for the first time.
Handle<ScopeInfo> target_scope_info =
@@ -7067,13 +7671,15 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
this, &target_info, &target_oracle, inlining_kind);
HConstant* undefined = graph()->GetConstantUndefined();
+ bool undefined_receiver = HEnvironment::UseUndefinedReceiver(
+ target, function, call_kind, inlining_kind);
HEnvironment* inner_env =
environment()->CopyForInlining(target,
arguments_count,
function,
undefined,
- call_kind,
- function_state()->inlining_kind());
+ function_state()->inlining_kind(),
+ undefined_receiver);
#ifdef V8_TARGET_ARCH_IA32
// IA32 only, overwrite the caller's context in the deoptimization
// environment with the correct one.
@@ -7107,10 +7713,10 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
new(zone()) HEnterInlined(target,
arguments_count,
function,
- call_kind,
function_state()->inlining_kind(),
function->scope()->arguments(),
- arguments_values);
+ arguments_values,
+ undefined_receiver);
function_state()->set_entry(enter_inlined);
AddInstruction(enter_inlined);
@@ -7138,9 +7744,8 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
inlined_count_ += nodes_added;
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- Handle<TypeFeedbackInfo>::cast(maybe_type_info));
+ TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
TraceInline(target, caller, NULL);
@@ -7220,7 +7825,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
}
-bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
// The function call we are inlining is a method call if the call
// is a property call.
CallKind call_kind = (expr->expression()->AsProperty() == NULL)
@@ -7237,8 +7842,8 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_FUNCTION,
expr->target(),
expr->arguments()->length(),
@@ -7249,8 +7854,8 @@ bool HGraphBuilder::TryInlineConstruct(CallNew* expr,
}
-bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
- Property* prop) {
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+ Property* prop) {
return TryInline(CALL_AS_METHOD,
getter,
0,
@@ -7261,9 +7866,9 @@ bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
}
-bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
- Assignment* assignment,
- HValue* implicit_return_value) {
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+ Assignment* assignment,
+ HValue* implicit_return_value) {
return TryInline(CALL_AS_METHOD,
setter,
1,
@@ -7274,11 +7879,29 @@ bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
}
-bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
+bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function,
+ Call* expr,
+ int arguments_count) {
+ return TryInline(CALL_AS_METHOD,
+ function,
+ arguments_count,
+ NULL,
+ expr->id(),
+ expr->ReturnId(),
+ NORMAL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr,
+ bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
+ case kMathExp:
+ if (!FLAG_fast_math) break;
+ // Fall through if FLAG_fast_math.
case kMathRound:
+ case kMathFloor:
case kMathAbs:
case kMathSqrt:
case kMathLog:
@@ -7289,8 +7912,8 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
- HUnaryMathOperation* op =
- new(zone()) HUnaryMathOperation(context, argument, id);
+ HInstruction* op =
+ HUnaryMathOperation::New(zone(), context, argument, id);
op->set_position(expr->position());
if (drop_extra) Drop(1); // Optionally drop the function.
ast_context()->ReturnInstruction(op, expr->id());
@@ -7305,10 +7928,11 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
}
-bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
- HValue* receiver,
- Handle<Map> receiver_map,
- CheckType check_type) {
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+ Call* expr,
+ HValue* receiver,
+ Handle<Map> receiver_map,
+ CheckType check_type) {
ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null());
// Try to inline calls like Math.* as operations in the calling function.
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
@@ -7324,20 +7948,24 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
ASSERT(!expr->holder().is_null());
AddInstruction(new(zone()) HCheckPrototypeMaps(
oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK),
- expr->holder()));
- HStringCharCodeAt* char_code =
+ expr->holder(),
+ zone()));
+ HInstruction* char_code =
BuildStringCharCodeAt(context, string, index);
if (id == kStringCharCodeAt) {
ast_context()->ReturnInstruction(char_code, expr->id());
return true;
}
AddInstruction(char_code);
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result =
+ HStringCharFromCode::New(zone(), context, char_code);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
break;
+ case kMathExp:
+ if (!FLAG_fast_math) break;
+ // Fall through if FLAG_fast_math.
case kMathRound:
case kMathFloor:
case kMathAbs:
@@ -7351,8 +7979,8 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
- HUnaryMathOperation* op =
- new(zone()) HUnaryMathOperation(context, argument, id);
+ HInstruction* op =
+ HUnaryMathOperation::New(zone(), context, argument, id);
op->set_position(expr->position());
ast_context()->ReturnInstruction(op, expr->id());
return true;
@@ -7371,30 +7999,31 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
double exponent = HConstant::cast(right)->DoubleValue();
if (exponent == 0.5) {
result =
- new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
+ HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
} else if (exponent == -0.5) {
HConstant* double_one =
- new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)),
+ new(zone()) HConstant(Handle<Object>(Smi::FromInt(1),
+ isolate()),
Representation::Double());
AddInstruction(double_one);
- HUnaryMathOperation* square_root =
- new(zone()) HUnaryMathOperation(context, left, kMathPowHalf);
- AddInstruction(square_root);
+ HInstruction* sqrt =
+ HUnaryMathOperation::New(zone(), context, left, kMathPowHalf);
+ AddInstruction(sqrt);
// MathPowHalf doesn't have side effects so there's no need for
// an environment simulation here.
- ASSERT(!square_root->HasObservableSideEffects());
- result = new(zone()) HDiv(context, double_one, square_root);
+ ASSERT(!sqrt->HasObservableSideEffects());
+ result = HDiv::New(zone(), context, double_one, sqrt);
} else if (exponent == 2.0) {
- result = new(zone()) HMul(context, left, left);
+ result = HMul::New(zone(), context, left, left);
}
} else if (right->IsConstant() &&
HConstant::cast(right)->HasInteger32Value() &&
HConstant::cast(right)->Integer32Value() == 2) {
- result = new(zone()) HMul(context, left, left);
+ result = HMul::New(zone(), context, left, left);
}
if (result == NULL) {
- result = new(zone()) HPower(left, right);
+ result = HPower::New(zone(), left, right);
}
ast_context()->ReturnInstruction(result, expr->id());
return true;
@@ -7422,7 +8051,8 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
HValue* context = environment()->LookupContext();
HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
: HMathMinMax::kMathMax;
- HMathMinMax* result = new(zone()) HMathMinMax(context, left, right, op);
+ HInstruction* result =
+ HMathMinMax::New(zone(), context, left, right, op);
ast_context()->ReturnInstruction(result, expr->id());
return true;
}
@@ -7435,7 +8065,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
}
-bool HGraphBuilder::TryCallApply(Call* expr) {
+bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
Expression* callee = expr->expression();
Property* prop = callee->AsProperty();
ASSERT(prop != NULL);
@@ -7488,25 +8118,41 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
return true;
} else {
// We are inside inlined function and we know exactly what is inside
- // arguments object.
- HValue* context = environment()->LookupContext();
-
- HValue* wrapped_receiver =
- AddInstruction(new(zone()) HWrapReceiver(receiver, function));
- PushAndAdd(new(zone()) HPushArgument(wrapped_receiver));
-
- HEnvironment* arguments_env = environment()->arguments_environment();
-
- int parameter_count = arguments_env->parameter_count();
- for (int i = 1; i < arguments_env->parameter_count(); i++) {
- PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i)));
+ // arguments object. But we need to be able to materialize at deopt.
+ // TODO(mstarzinger): For now we just ensure arguments are pushed
+ // right after HEnterInlined, but we could be smarter about this.
+ EnsureArgumentsArePushedForAccess();
+ ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
+ function_state()->entry()->arguments_values()->length());
+ HEnterInlined* entry = function_state()->entry();
+ ZoneList<HValue*>* arguments_values = entry->arguments_values();
+ int arguments_count = arguments_values->length();
+ PushAndAdd(new(zone()) HWrapReceiver(receiver, function));
+ for (int i = 1; i < arguments_count; i++) {
+ Push(arguments_values->at(i));
+ }
+
+ Handle<JSFunction> known_function;
+ if (function->IsConstant()) {
+ HConstant* constant_function = HConstant::cast(function);
+ known_function = Handle<JSFunction>::cast(constant_function->handle());
+ int args_count = arguments_count - 1; // Excluding receiver.
+ if (TryInlineApply(known_function, expr, args_count)) return true;
+ }
+
+ Drop(arguments_count - 1);
+ PushAndAdd(new(zone()) HPushArgument(Pop()));
+ for (int i = 1; i < arguments_count; i++) {
+ PushAndAdd(new(zone()) HPushArgument(arguments_values->at(i)));
}
+ HValue* context = environment()->LookupContext();
HInvokeFunction* call = new(zone()) HInvokeFunction(
context,
function,
- parameter_count);
- Drop(parameter_count);
+ known_function,
+ arguments_count);
+ Drop(arguments_count);
call->set_position(expr->position());
ast_context()->ReturnInstruction(call, expr->id());
return true;
@@ -7514,7 +8160,56 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
}
-void HGraphBuilder::VisitCall(Call* expr) {
+// Checks if all maps in |types| are from the same family, i.e., are elements
+// transitions of each other. Returns either NULL if they are not from the same
+// family, or a Map* indicating the map with the first elements kind of the
+// family that is in the list.
+static Map* CheckSameElementsFamily(SmallMapList* types) {
+ if (types->length() <= 1) return NULL;
+ // Check if all maps belong to the same transition family.
+ Map* kinds[kFastElementsKindCount];
+ Map* first_map = *types->first();
+ ElementsKind first_kind = first_map->elements_kind();
+ if (!IsFastElementsKind(first_kind)) return NULL;
+ int first_index = GetSequenceIndexFromFastElementsKind(first_kind);
+ int last_index = first_index;
+
+ for (int i = 0; i < kFastElementsKindCount; i++) kinds[i] = NULL;
+
+ kinds[first_index] = first_map;
+
+ for (int i = 1; i < types->length(); ++i) {
+ Map* map = *types->at(i);
+ ElementsKind elements_kind = map->elements_kind();
+ if (!IsFastElementsKind(elements_kind)) return NULL;
+ int index = GetSequenceIndexFromFastElementsKind(elements_kind);
+ if (index < first_index) {
+ first_index = index;
+ } else if (index > last_index) {
+ last_index = index;
+ } else if (kinds[index] != map) {
+ return NULL;
+ }
+ kinds[index] = map;
+ }
+
+ Map* current = kinds[first_index];
+ for (int i = first_index + 1; i <= last_index; i++) {
+ Map* next = kinds[i];
+ if (next != NULL) {
+ ElementsKind current_kind = next->elements_kind();
+ if (next != current->LookupElementsTransitionMap(current_kind)) {
+ return NULL;
+ }
+ current = next;
+ }
+ }
+
+ return kinds[first_index];
+}
+
+
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7553,15 +8248,25 @@ void HGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
-
SmallMapList* types = expr->GetReceiverTypes();
- HValue* receiver =
- environment()->ExpressionStackAt(expr->arguments()->length());
- if (expr->IsMonomorphic()) {
- Handle<Map> receiver_map = (types == NULL || types->is_empty())
+ bool monomorphic = expr->IsMonomorphic();
+ Handle<Map> receiver_map;
+ if (monomorphic) {
+ receiver_map = (types == NULL || types->is_empty())
? Handle<Map>::null()
: types->first();
+ } else {
+ Map* family_map = CheckSameElementsFamily(types);
+ if (family_map != NULL) {
+ receiver_map = Handle<Map>(family_map);
+ monomorphic = expr->ComputeTarget(receiver_map, name);
+ }
+ }
+
+ HValue* receiver =
+ environment()->ExpressionStackAt(expr->arguments()->length());
+ if (monomorphic) {
if (TryInlineBuiltinMethodCall(expr,
receiver,
receiver_map,
@@ -7606,7 +8311,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
return Bailout("possible direct call to eval");
}
@@ -7670,7 +8375,9 @@ void HGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
call = new(zone()) HCallGlobal(context, var->name(), argument_count);
- if (var->is_qml_global()) static_cast<HCallGlobal*>(call)->set_qml_global(true);
+ if (var->is_qml_global()) {
+ static_cast<HCallGlobal*>(call)->set_qml_global(true);
+ }
Drop(argument_count);
}
@@ -7736,7 +8443,7 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
}
-void HGraphBuilder::VisitCallNew(CallNew* expr) {
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7789,8 +8496,21 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
CHECK_ALIVE(VisitArgument(expr->expression()));
HValue* constructor = HPushArgument::cast(Top())->argument();
CHECK_ALIVE(VisitArgumentList(expr->arguments()));
- HInstruction* call =
- new(zone()) HCallNew(context, constructor, argument_count);
+ HCallNew* call;
+ if (FLAG_optimize_constructed_arrays &&
+ !(expr->target().is_null()) &&
+ *(expr->target()) == isolate()->global_context()->array_function()) {
+ Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId());
+ ASSERT(feedback->IsSmi());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(feedback);
+ AddInstruction(new(zone()) HCheckFunction(constructor,
+ Handle<JSFunction>(isolate()->global_context()->array_function())));
+ call = new(zone()) HCallNewArray(context, constructor, argument_count,
+ cell);
+ } else {
+ call = new(zone()) HCallNew(context, constructor, argument_count);
+ }
Drop(argument_count);
call->set_position(expr->position());
return ast_context()->ReturnInstruction(call, expr->id());
@@ -7800,20 +8520,21 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) {
// Support for generating inlined runtime functions.
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of HGraphBuilder.
+// Lookup table for generators for runtime calls that are generated inline.
+// Elements of the table are member pointers to functions of
+// HOptimizedGraphBuilder.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HGraphBuilder::Generate##Name,
+ &HOptimizedGraphBuilder::Generate##Name,
-const HGraphBuilder::InlineFunctionGenerator
- HGraphBuilder::kInlineFunctionGenerators[] = {
+const HOptimizedGraphBuilder::InlineFunctionGenerator
+ HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7851,7 +8572,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
}
-void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -7867,7 +8588,7 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
}
}
-void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
Property* prop = expr->expression()->AsProperty();
VariableProxy* proxy = expr->expression()->AsVariableProxy();
if (prop != NULL) {
@@ -7902,13 +8623,13 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) {
}
-void HGraphBuilder::VisitVoid(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->expression()));
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
-void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
CHECK_ALIVE(VisitForTypeOf(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
@@ -7917,49 +8638,48 @@ void HGraphBuilder::VisitTypeof(UnaryOperation* expr) {
}
-void HGraphBuilder::VisitAdd(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstant1());
+ HMul::New(zone(), context, value, graph()->GetConstant1());
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitSub(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* instr =
- new(zone()) HMul(context, value, graph_->GetConstantMinus1());
+ HMul::New(zone(), context, value, graph()->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
+ Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
+ AddSoftDeoptimize();
info = TypeInfo::Unknown();
}
- Representation rep = ToRepresentation(info);
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep);
+ }
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitBitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* value = Pop();
TypeInfo info = oracle()->UnaryType(expr);
if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
+ AddSoftDeoptimize();
}
HInstruction* instr = new(zone()) HBitNot(value);
return ast_context()->ReturnInstruction(instr, expr->id());
}
-void HGraphBuilder::VisitNot(UnaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
VisitForControl(expr->expression(),
@@ -8003,8 +8723,9 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) {
}
-HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
- CountOperation* expr) {
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+ bool returns_original_input,
+ CountOperation* expr) {
// The input to the count operation is on top of the expression stack.
TypeInfo info = oracle()->IncrementType(expr);
Representation rep = ToRepresentation(info);
@@ -8026,18 +8747,21 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
// to simulate the expression stack after this instruction.
// Any later failures deopt to the load of the input or earlier.
HConstant* delta = (expr->op() == Token::INC)
- ? graph_->GetConstant1()
- : graph_->GetConstantMinus1();
+ ? graph()->GetConstant1()
+ : graph()->GetConstantMinus1();
HValue* context = environment()->LookupContext();
- HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
- TraceRepresentation(expr->op(), info, instr, rep);
+ HInstruction* instr = HAdd::New(zone(), context, Top(), delta);
+ // We can't insert a simulate here, because it would break deoptimization,
+ // so the HAdd must not have side effects, so we must freeze its
+ // representation.
instr->AssumeRepresentation(rep);
+ instr->ClearAllSideEffects();
AddInstruction(instr);
return instr;
}
-void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8105,7 +8829,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
break;
}
@@ -8121,7 +8845,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
if (prop->key()->IsPropertyName()) {
// Named property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
HValue* object = Top();
@@ -8146,13 +8870,15 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
+ if (load->HasObservableSideEffects()) {
+ AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ }
after = BuildIncrement(returns_original_input, expr);
input = Pop();
HInstruction* store;
- if (!monomorphic) {
+ if (!monomorphic || map->is_observed()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after));
} else {
@@ -8174,11 +8900,13 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ }
} else {
// Keyed property.
- if (returns_original_input) Push(graph_->GetConstantUndefined());
+ if (returns_original_input) Push(graph()->GetConstantUndefined());
CHECK_ALIVE(VisitForValue(prop->obj()));
CHECK_ALIVE(VisitForValue(prop->key()));
@@ -8191,7 +8919,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId());
+ if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -8209,7 +8937,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId());
+ AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
}
}
@@ -8218,15 +8946,27 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
-HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index) {
+HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+ HValue* context,
+ HValue* string,
+ HValue* index) {
+ if (string->IsConstant() && index->IsConstant()) {
+ HConstant* c_string = HConstant::cast(string);
+ HConstant* c_index = HConstant::cast(index);
+ if (c_string->HasStringValue() && c_index->HasNumberValue()) {
+ int32_t i = c_index->NumberValueAsInteger32();
+ Handle<String> s = c_string->StringValue();
+ if (i < 0 || i >= s->length()) {
+ return new(zone()) HConstant(OS::nan_value(), Representation::Double());
+ }
+ return new(zone()) HConstant(s->Get(i), Representation::Integer32());
+ }
+ }
AddInstruction(new(zone()) HCheckNonSmi(string));
AddInstruction(HCheckInstanceType::NewIsString(string, zone()));
- HStringLength* length = new(zone()) HStringLength(string);
+ HInstruction* length = HStringLength::New(zone(), string);
AddInstruction(length);
- HInstruction* checked_index =
- AddInstruction(new(zone()) HBoundsCheck(index, length));
+ HInstruction* checked_index = AddBoundsCheck(index, length);
return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
@@ -8235,6 +8975,7 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
HValue* const32_minus_sa) {
if (!const32_minus_sa->IsSub()) return false;
HSub* sub = HSub::cast(const32_minus_sa);
+ if (sa != sub->right()) return false;
HValue* const32 = sub->left();
if (!const32->IsConstant() ||
HConstant::cast(const32)->Integer32Value() != 32) {
@@ -8248,10 +8989,10 @@ static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
// directions that can be replaced by one rotate right instruction or not.
// Returns the operand and the shift amount for the rotate instruction in the
// former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
+bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left,
+ HValue* right,
+ HValue** operand,
+ HValue** shift_amount) {
HShl* shl;
HShr* shr;
if (left->IsShl() && right->IsShr()) {
@@ -8263,6 +9004,7 @@ bool HGraphBuilder::MatchRotateRight(HValue* left,
} else {
return false;
}
+ if (shl->left() != shr->left()) return false;
if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
!ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
@@ -8286,89 +9028,83 @@ bool CanBeZero(HValue *right) {
}
-HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
- HValue* left,
- HValue* right) {
+HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation(
+ BinaryOperation* expr,
+ HValue* left,
+ HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo info = oracle()->BinaryType(expr);
- if (info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- info = TypeInfo::Unknown();
+ TypeInfo left_info, right_info, result_info, combined_info;
+ oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
+ Representation left_rep = ToRepresentation(left_info);
+ Representation right_rep = ToRepresentation(right_info);
+ Representation result_rep = ToRepresentation(result_info);
+ if (left_info.IsUninitialized()) {
+ // Can't have initialized one but not the other.
+ ASSERT(right_info.IsUninitialized());
+ AddSoftDeoptimize();
+ left_info = right_info = TypeInfo::Unknown();
}
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- if (info.IsString()) {
+ if (left_info.IsString() && right_info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
AddInstruction(HCheckInstanceType::NewIsString(right, zone()));
- instr = new(zone()) HStringAdd(context, left, right);
+ instr = HStringAdd::New(zone(), context, left, right);
} else {
- instr = HAdd::NewHAdd(zone(), context, left, right);
+ instr = HAdd::New(zone(), context, left, right);
}
break;
case Token::SUB:
- instr = HSub::NewHSub(zone(), context, left, right);
+ instr = HSub::New(zone(), context, left, right);
break;
case Token::MUL:
- instr = HMul::NewHMul(zone(), context, left, right);
+ instr = HMul::New(zone(), context, left, right);
break;
case Token::MOD:
- instr = HMod::NewHMod(zone(), context, left, right);
+ instr = HMod::New(zone(), context, left, right);
break;
case Token::DIV:
- instr = HDiv::NewHDiv(zone(), context, left, right);
+ instr = HDiv::New(zone(), context, left, right);
break;
case Token::BIT_XOR:
case Token::BIT_AND:
- instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
+ instr = HBitwise::New(zone(), expr->op(), context, left, right);
break;
case Token::BIT_OR: {
HValue* operand, *shift_amount;
- if (info.IsInteger32() &&
+ if (left_info.IsInteger32() && right_info.IsInteger32() &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
instr = new(zone()) HRor(context, operand, shift_amount);
} else {
- instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
+ instr = HBitwise::New(zone(), expr->op(), context, left, right);
}
break;
}
case Token::SAR:
- instr = HSar::NewHSar(zone(), context, left, right);
+ instr = HSar::New(zone(), context, left, right);
break;
case Token::SHR:
- instr = HShr::NewHShr(zone(), context, left, right);
+ instr = HShr::New(zone(), context, left, right);
if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
CanBeZero(right)) {
graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
- instr = HShl::NewHShl(zone(), context, left, right);
+ instr = HShl::New(zone(), context, left, right);
break;
default:
UNREACHABLE();
}
- // If we hit an uninitialized binary op stub we will get type info
- // for a smi operation. If one of the operands is a constant string
- // do not generate code assuming it is a smi operation.
- if (info.IsSmi() &&
- ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
- (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
- return instr;
- }
- Representation rep = ToRepresentation(info);
- // We only generate either int32 or generic tagged bitwise operations.
- if (instr->IsBitwiseBinaryOperation()) {
- HBitwiseBinaryOperation::cast(instr)->
- InitializeObservedInputRepresentation(rep);
- if (rep.IsDouble()) rep = Representation::Integer32();
+ if (instr->IsBinaryOperation()) {
+ HBinaryOperation* binop = HBinaryOperation::cast(instr);
+ binop->set_observed_input_representation(left_rep, right_rep);
+ binop->initialize_output_representation(result_rep);
}
- TraceRepresentation(expr->op(), info, instr, rep);
- instr->AssumeRepresentation(rep);
return instr;
}
@@ -8381,13 +9117,15 @@ static bool IsClassOfTest(CompareOperation* expr) {
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
if (!literal->handle()->IsString()) return false;
- if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false;
+ if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
+ return false;
+ }
ASSERT(call->arguments()->length() == 1);
return true;
}
-void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8403,7 +9141,7 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitComma(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
CHECK_ALIVE(VisitForEffect(expr->left()));
// Visit the right subexpression in the same AST context as the entire
// expression.
@@ -8411,7 +9149,7 @@ void HGraphBuilder::VisitComma(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
bool is_logical_and = expr->op() == Token::AND;
if (ast_context()->IsTest()) {
TestContext* context = TestContext::cast(ast_context());
@@ -8438,6 +9176,17 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
} else if (ast_context()->IsValue()) {
CHECK_ALIVE(VisitForValue(expr->left()));
ASSERT(current_block() != NULL);
+ HValue* left_value = Top();
+
+ if (left_value->IsConstant()) {
+ HConstant* left_constant = HConstant::cast(left_value);
+ if ((is_logical_and && left_constant->ToBoolean()) ||
+ (!is_logical_and && !left_constant->ToBoolean())) {
+ Drop(1); // left_value.
+ CHECK_BAILOUT(VisitForValue(expr->right()));
+ }
+ return ast_context()->ReturnValue(Pop());
+ }
// We need an extra block to maintain edge-split form.
HBasicBlock* empty_block = graph()->CreateBasicBlock();
@@ -8445,8 +9194,8 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
TypeFeedbackId test_id = expr->left()->test_id();
ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id));
HBranch* test = is_logical_and
- ? new(zone()) HBranch(Top(), eval_right, empty_block, expected)
- : new(zone()) HBranch(Top(), empty_block, eval_right, expected);
+ ? new(zone()) HBranch(left_value, eval_right, empty_block, expected)
+ : new(zone()) HBranch(left_value, empty_block, eval_right, expected);
current_block()->Finish(test);
set_current_block(eval_right);
@@ -8501,7 +9250,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
}
-void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
HValue* right = Pop();
@@ -8512,27 +9261,8 @@ void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
}
-void HGraphBuilder::TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep) {
- if (!FLAG_trace_representation) return;
- // TODO(svenpanne) Under which circumstances are we actually not flexible?
- // At first glance, this looks a bit weird...
- bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
- PrintF("Operation %s has type info %s, %schange representation assumption "
- "for %s (ID %d) from %s to %s\n",
- Token::Name(op),
- info.ToString(),
- flexible ? "" : " DO NOT ",
- value->Mnemonic(),
- graph_->GetMaximumValueID(),
- value->representation().Mnemonic(),
- rep.Mnemonic());
-}
-
-
-Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
+Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) {
+ if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
@@ -8541,9 +9271,9 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
}
-void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
- HTypeof* typeof_expr,
- Handle<String> check) {
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+ HTypeof* typeof_expr,
+ Handle<String> check) {
// Note: The HTypeof itself is removed during canonicalization, if possible.
HValue* value = typeof_expr->value();
HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check);
@@ -8613,7 +9343,7 @@ static bool IsLiteralCompareBool(HValue* left,
}
-void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8630,13 +9360,16 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo type_info = oracle()->CompareType(expr);
+ TypeInfo left_type, right_type, overall_type_info;
+ oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
+ Representation combined_rep = ToRepresentation(overall_type_info);
+ Representation left_rep = ToRepresentation(left_type);
+ Representation right_rep = ToRepresentation(right_type);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
- if (type_info.IsUninitialized()) {
- AddInstruction(new(zone()) HSoftDeoptimize);
- current_block()->MarkAsDeoptimizing();
- type_info = TypeInfo::Unknown();
+ if (overall_type_info.IsUninitialized()) {
+ AddSoftDeoptimize();
+ overall_type_info = left_type = right_type = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
@@ -8708,7 +9441,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (type_info.IsNonPrimitive()) {
+ } else if (overall_type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
@@ -8735,37 +9468,37 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
- (op == Token::EQ || op == Token::EQ_STRICT)) {
+ } else if (overall_type_info.IsInternalizedString() &&
+ Token::IsEqualityOp(op)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
+ AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone()));
+ AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- Representation r = ToRepresentation(type_info);
- if (r.IsTagged()) {
+ if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
+ result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
- result->SetInputRepresentation(r);
return ast_context()->ReturnControl(result, expr->id());
}
}
}
-void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
- HValue* value,
- NilValue nil) {
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+ HValue* value,
+ NilValue nil) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8777,7 +9510,7 @@ void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
}
-HInstruction* HGraphBuilder::BuildThisFunction() {
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
if (function_state()->outer() != NULL) {
@@ -8790,7 +9523,7 @@ HInstruction* HGraphBuilder::BuildThisFunction() {
}
-void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
@@ -8799,7 +9532,8 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) {
}
-void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+void HOptimizedGraphBuilder::VisitDeclarations(
+ ZoneList<Declaration*>* declarations) {
ASSERT(globals_.is_empty());
AstVisitor::VisitDeclarations(declarations);
if (!globals_.is_empty()) {
@@ -8817,7 +9551,8 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
}
-void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+ VariableDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
VariableMode mode = declaration->mode();
Variable* variable = proxy->var();
@@ -8828,7 +9563,8 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
globals_.Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
return;
case Variable::PARAMETER:
case Variable::LOCAL:
@@ -8844,7 +9580,9 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
}
break;
case Variable::LOOKUP:
@@ -8853,7 +9591,8 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
}
-void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+ FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
Variable* variable = proxy->var();
switch (variable->location()) {
@@ -8864,7 +9603,8 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_.Add(function, zone());
- globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_.Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
return;
}
case Variable::PARAMETER:
@@ -8881,7 +9621,9 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
+ if (store->HasObservableSideEffects()) {
+ AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
+ }
break;
}
case Variable::LOOKUP:
@@ -8890,44 +9632,52 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
}
-void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleDeclaration(
+ ModuleDeclaration* declaration) {
+ UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+ ImportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+ ExportDeclaration* declaration) {
UNREACHABLE();
}
-void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) {
+void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) {
+void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) {
+void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModulePath(ModulePath* module) {
+void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
UNREACHABLE();
}
-void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
+void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
UNREACHABLE();
}
// Generators for inline runtime functions.
// Support for types.
-void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8936,7 +9686,7 @@ void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8948,7 +9698,17 @@ void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsSymbol(CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HHasInstanceTypeAndBranch* result =
+ new(zone()) HHasInstanceTypeAndBranch(value, SYMBOL_TYPE);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8958,7 +9718,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) {
}
-void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8968,7 +9728,7 @@ void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8978,7 +9738,7 @@ void HGraphBuilder::GenerateIsArray(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8988,7 +9748,7 @@ void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -8997,12 +9757,12 @@ void HGraphBuilder::GenerateIsObject(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
return Bailout("inlined runtime function: IsNonNegativeSmi");
}
-void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9012,7 +9772,7 @@ void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
}
-void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
+void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
CallRuntime* call) {
return Bailout(
"inlined runtime function: IsStringWrapperSafeForDefaultValueOf");
@@ -9020,7 +9780,7 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
// Support for construct call checks.
-void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
ASSERT(call->arguments()->length() == 0);
if (function_state()->outer() != NULL) {
// We are generating graph for inlined function.
@@ -9036,7 +9796,7 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
// Support for arguments.length and arguments[?].
-void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
@@ -9049,7 +9809,7 @@ void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
}
-void HGraphBuilder::GenerateArguments(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
// Our implementation of arguments (based on this stack frame or an
// adapter below it) does not work for inlined functions. This runtime
// function is blacklisted by AstNode::IsInlineable.
@@ -9060,8 +9820,7 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
HInstruction* elements = AddInstruction(
new(zone()) HArgumentsElements(false));
HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements));
- HInstruction* checked_index =
- AddInstruction(new(zone()) HBoundsCheck(index, length));
+ HInstruction* checked_index = AddBoundsCheck(index, length);
HAccessArgumentsAt* result =
new(zone()) HAccessArgumentsAt(elements, length, checked_index);
return ast_context()->ReturnInstruction(result, call->id());
@@ -9069,14 +9828,14 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) {
// Support for accessing the class and value fields of an object.
-void HGraphBuilder::GenerateClassOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
// The special form detected by IsClassOfTest is detected before we get here
// and does not cause a bailout.
return Bailout("inlined runtime function: ClassOf");
}
-void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9085,7 +9844,7 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) {
}
-void HGraphBuilder::GenerateDateField(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle()));
@@ -9096,7 +9855,40 @@ void HGraphBuilder::GenerateDateField(CallRuntime* call) {
}
-void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ String::ONE_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
+ CallRuntime* call) {
+ ASSERT(call->arguments()->length() == 3);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+ HValue* value = Pop();
+ HValue* index = Pop();
+ HValue* string = Pop();
+ HValue* context = environment()->LookupContext();
+ HInstruction* char_code = BuildStringCharCodeAt(context, string, index);
+ AddInstruction(char_code);
+ HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
+ String::TWO_BYTE_ENCODING, string, index, value);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9125,7 +9917,7 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
// Create in-object property store to kValueOffset.
set_current_block(if_js_value);
- Handle<String> name = isolate()->factory()->undefined_symbol();
+ Handle<String> name = isolate()->factory()->undefined_string();
AddInstruction(new(zone()) HStoreNamedField(object,
name,
value,
@@ -9139,48 +9931,46 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
// Fast support for charCodeAt(n).
-void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->LookupContext();
- HStringCharCodeAt* result = BuildStringCharCodeAt(context, string, index);
+ HInstruction* result = BuildStringCharCodeAt(context, string, index);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* char_code = Pop();
HValue* context = environment()->LookupContext();
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result = HStringCharFromCode::New(zone(), context, char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for string.charAt(n) and string[n].
-void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* index = Pop();
HValue* string = Pop();
HValue* context = environment()->LookupContext();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
+ HInstruction* char_code = BuildStringCharCodeAt(context, string, index);
AddInstruction(char_code);
- HStringCharFromCode* result =
- new(zone()) HStringCharFromCode(context, char_code);
+ HInstruction* result = HStringCharFromCode::New(zone(), context, char_code);
return ast_context()->ReturnInstruction(result, call->id());
}
// Fast support for object equality testing.
-void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
@@ -9192,14 +9982,14 @@ void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
}
-void HGraphBuilder::GenerateLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) {
// %_Log is ignored in optimized code.
return ast_context()->ReturnValue(graph()->GetConstantUndefined());
}
// Fast support for Math.random().
-void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
AddInstruction(global_object);
@@ -9209,7 +9999,7 @@ void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) {
// Fast support for StringAdd.
-void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9220,7 +10010,7 @@ void HGraphBuilder::GenerateStringAdd(CallRuntime* call) {
// Fast support for SubString.
-void HGraphBuilder::GenerateSubString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9231,7 +10021,7 @@ void HGraphBuilder::GenerateSubString(CallRuntime* call) {
// Fast support for StringCompare.
-void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9243,7 +10033,7 @@ void HGraphBuilder::GenerateStringCompare(CallRuntime* call) {
// Support for direct calls from JavaScript to native RegExp code.
-void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
ASSERT_EQ(4, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9254,7 +10044,7 @@ void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
// Construct a RegExp exec result with two in-object properties.
-void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
ASSERT_EQ(3, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9266,13 +10056,13 @@ void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
// Support for fast native caches.
-void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) {
return Bailout("inlined runtime function: GetFromCache");
}
// Fast support for number to string.
-void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9284,7 +10074,7 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) {
// Fast call for custom callbacks.
-void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
int arg_count = call->arguments()->length() - 1;
ASSERT(arg_count >= 1); // There's always at least a receiver.
@@ -9328,18 +10118,18 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// Fast call to math functions.
-void HGraphBuilder::GenerateMathPow(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
ASSERT_EQ(2, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HPower* result = new(zone()) HPower(left, right);
+ HInstruction* result = HPower::New(zone(), left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
-void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9351,7 +10141,7 @@ void HGraphBuilder::GenerateMathSin(CallRuntime* call) {
}
-void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9363,7 +10153,7 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) {
}
-void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9375,7 +10165,7 @@ void HGraphBuilder::GenerateMathTan(CallRuntime* call) {
}
-void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
ASSERT_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitArgumentList(call->arguments()));
HValue* context = environment()->LookupContext();
@@ -9387,18 +10177,18 @@ void HGraphBuilder::GenerateMathLog(CallRuntime* call) {
}
-void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
return Bailout("inlined runtime function: MathSqrt");
}
// Check whether two RegExps are equivalent
-void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) {
return Bailout("inlined runtime function: IsRegExpEquivalent");
}
-void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -9407,7 +10197,7 @@ void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
}
-void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
return Bailout("inlined runtime function: FastAsciiArrayJoin");
}
@@ -9422,7 +10212,6 @@ HEnvironment::HEnvironment(HEnvironment* outer,
Zone* zone)
: closure_(closure),
values_(0, zone),
- assigned_variables_(4, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
specials_count_(1),
@@ -9437,12 +10226,27 @@ HEnvironment::HEnvironment(HEnvironment* outer,
}
+HEnvironment::HEnvironment(Zone* zone, int parameter_count)
+ : values_(0, zone),
+ frame_type_(STUB),
+ parameter_count_(parameter_count),
+ specials_count_(1),
+ local_count_(0),
+ outer_(NULL),
+ entry_(NULL),
+ pop_count_(0),
+ push_count_(0),
+ ast_id_(BailoutId::None()),
+ zone_(zone) {
+ Initialize(parameter_count, 0, 0);
+}
+
+
HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
: values_(0, zone),
- assigned_variables_(0, zone),
frame_type_(JS_FUNCTION),
parameter_count_(0),
- specials_count_(1),
+ specials_count_(0),
local_count_(0),
outer_(NULL),
entry_(NULL),
@@ -9461,7 +10265,6 @@ HEnvironment::HEnvironment(HEnvironment* outer,
Zone* zone)
: closure_(closure),
values_(arguments, zone),
- assigned_variables_(0, zone),
frame_type_(frame_type),
parameter_count_(arguments),
local_count_(0),
@@ -9490,7 +10293,7 @@ void HEnvironment::Initialize(int parameter_count,
void HEnvironment::Initialize(const HEnvironment* other) {
closure_ = other->closure();
values_.AddAll(other->values_, zone());
- assigned_variables_.AddAll(other->assigned_variables_, zone());
+ assigned_variables_.Union(other->assigned_variables_, zone());
frame_type_ = other->frame_type_;
parameter_count_ = other->parameter_count_;
local_count_ = other->local_count_;
@@ -9498,6 +10301,7 @@ void HEnvironment::Initialize(const HEnvironment* other) {
entry_ = other->entry_;
pop_count_ = other->pop_count_;
push_count_ = other->push_count_;
+ specials_count_ = other->specials_count_;
ast_id_ = other->ast_id_;
}
@@ -9534,9 +10338,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
void HEnvironment::Bind(int index, HValue* value) {
ASSERT(value != NULL);
- if (!assigned_variables_.Contains(index)) {
- assigned_variables_.Add(index, zone());
- }
+ assigned_variables_.Add(index, zone());
values_[index] = value;
}
@@ -9619,8 +10421,8 @@ HEnvironment* HEnvironment::CopyForInlining(
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- CallKind call_kind,
- InliningKind inlining_kind) const {
+ InliningKind inlining_kind,
+ bool undefined_receiver) const {
ASSERT(frame_type() == JS_FUNCTION);
// Outer environment is a copy of this one without the arguments.
@@ -9661,8 +10463,7 @@ HEnvironment* HEnvironment::CopyForInlining(
// If the function we are inlining is a strict mode function or a
// builtin function, pass undefined as the receiver for function
// calls (instead of the global receiver).
- if ((target->shared()->native() || !function->is_classic_mode()) &&
- call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN) {
+ if (undefined_receiver) {
inner->SetValueAt(0, undefined);
}
inner->SetValueAt(arity + 1, LookupContext());
@@ -9704,21 +10505,29 @@ void HEnvironment::PrintToStd() {
}
-void HTracer::TraceCompilation(FunctionLiteral* function) {
+void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
- Handle<String> name = function->debug_name();
- PrintStringProperty("name", *name->ToCString());
- PrintStringProperty("method", *name->ToCString());
+ if (info->IsOptimizing()) {
+ Handle<String> name = info->function()->debug_name();
+ PrintStringProperty("name", *name->ToCString());
+ PrintStringProperty("method", *name->ToCString());
+ } else {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("method", "stub");
+ }
PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
}
void HTracer::TraceLithium(const char* name, LChunk* chunk) {
+ AllowHandleDereference allow_handle_deref(chunk->graph()->isolate());
Trace(name, chunk->graph(), chunk);
}
void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
+ AllowHandleDereference allow_handle_deref(graph->isolate());
Trace(name, graph, NULL);
}
@@ -9920,6 +10729,7 @@ void HTracer::FlushToFile() {
void HStatistics::Initialize(CompilationInfo* info) {
+ if (info->shared_info().is_null()) return;
source_size_ += info->shared_info()->SourceSize();
}
@@ -9935,38 +10745,55 @@ void HStatistics::Print() {
PrintF("%30s", names_[i]);
double ms = static_cast<double>(timing_[i]) / 1000;
double percent = static_cast<double>(timing_[i]) * 100 / sum;
- PrintF(" - %7.3f ms / %4.1f %% ", ms, percent);
+ PrintF(" - %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
double size_percent = static_cast<double>(size) * 100 / total_size_;
- PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
- }
+ PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
+ }
+
+ PrintF("----------------------------------------"
+ "---------------------------------------\n");
+ int64_t total = create_graph_ + optimize_graph_ + generate_code_;
+ PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ "Create graph",
+ static_cast<double>(create_graph_) / 1000,
+ static_cast<double>(create_graph_) * 100 / total);
+ PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ "Optimize graph",
+ static_cast<double>(optimize_graph_) / 1000,
+ static_cast<double>(optimize_graph_) * 100 / total);
+ PrintF("%30s - %8.3f ms / %4.1f %% \n",
+ "Generate and install code",
+ static_cast<double>(generate_code_) / 1000,
+ static_cast<double>(generate_code_) * 100 / total);
+ PrintF("----------------------------------------"
+ "---------------------------------------\n");
+ PrintF("%30s - %8.3f ms (%.1f times slower than full code gen)\n",
+ "Total",
+ static_cast<double>(total) / 1000,
+ static_cast<double>(total) / full_code_gen_);
+
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(sum) / 1000) / source_size_in_kb
+ ? (static_cast<double>(total) / 1000) / source_size_in_kb
: 0;
- double normalized_bytes = source_size_in_kb > 0
- ? total_size_ / source_size_in_kb
+ double normalized_size_in_kb = source_size_in_kb > 0
+ ? total_size_ / 1024 / source_size_in_kb
: 0;
- PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum",
- normalized_time, normalized_bytes);
- PrintF("---------------------------------------------------------------\n");
- PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
- "Total",
- static_cast<double>(total_) / 1000,
- static_cast<double>(total_) / full_code_gen_);
+ PrintF("%30s - %8.3f ms %7.3f kB allocated\n",
+ "Average per kB source",
+ normalized_time, normalized_size_in_kb);
}
void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
if (name == HPhase::kFullCodeGen) {
full_code_gen_ += ticks;
- } else if (name == HPhase::kTotal) {
- total_ += ticks;
} else {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
- if (names_[i] == name) {
+ if (strcmp(names_[i], name) == 0) {
timing_[i] += ticks;
sizes_[i] += size;
return;
@@ -9980,8 +10807,6 @@ void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
const char* const HPhase::kFullCodeGen = "Full code generator";
-const char* const HPhase::kTotal = "Total";
-
void HPhase::Begin(const char* name,
HGraph* graph,
diff --git a/src/3rdparty/v8/src/hydrogen.h b/src/3rdparty/v8/src/hydrogen.h
index 3748970..a9829a0 100644
--- a/src/3rdparty/v8/src/hydrogen.h
+++ b/src/3rdparty/v8/src/hydrogen.h
@@ -91,6 +91,8 @@ class HBasicBlock: public ZoneObject {
void set_last_instruction_index(int index) {
last_instruction_index_ = index;
}
+ bool is_osr_entry() { return is_osr_entry_; }
+ void set_osr_entry() { is_osr_entry_ = true; }
void AttachLoopInformation();
void DetachLoopInformation();
@@ -125,7 +127,10 @@ class HBasicBlock: public ZoneObject {
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
+ void AddSimulate(BailoutId ast_id,
+ RemovableSimulate removable = FIXED_SIMULATE) {
+ AddInstruction(CreateSimulate(ast_id, removable));
+ }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@@ -166,7 +171,7 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id);
+ HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -190,6 +195,7 @@ class HBasicBlock: public ZoneObject {
bool is_inline_return_target_;
bool is_deoptimizing_;
bool dominates_loop_successors_;
+ bool is_osr_entry_;
};
@@ -244,7 +250,7 @@ class HGraph: public ZoneObject {
public:
explicit HGraph(CompilationInfo* info);
- Isolate* isolate() { return isolate_; }
+ Isolate* isolate() const { return isolate_; }
Zone* zone() const { return zone_; }
CompilationInfo* info() const { return info_; }
@@ -255,20 +261,23 @@ class HGraph: public ZoneObject {
void InitializeInferredTypes();
void InsertTypeConversions();
+ void MergeRemovableSimulates();
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
void ComputeSafeUint32Operations();
+ void GlobalValueNumbering();
bool ProcessArgumentsObject();
void EliminateRedundantPhis();
void EliminateUnreachablePhis();
void Canonicalize();
void OrderBlocks();
void AssignDominators();
- void ReplaceCheckedValues();
+ void SetupInformativeDefinitions();
void EliminateRedundantBoundsChecks();
void DehoistSimpleArrayIndexComputations();
void DeadCodeElimination();
+ void RestoreActualValues();
void PropagateDeoptimizingMark();
void EliminateUnusedInstructions();
@@ -286,6 +295,7 @@ class HGraph: public ZoneObject {
undefined_constant_.set(constant);
}
HConstant* GetConstantUndefined() const { return undefined_constant_.get(); }
+ HConstant* GetConstant0();
HConstant* GetConstant1();
HConstant* GetConstantMinus1();
HConstant* GetConstantTrue();
@@ -351,6 +361,14 @@ class HGraph: public ZoneObject {
use_optimistic_licm_ = value;
}
+ bool has_soft_deoptimize() {
+ return has_soft_deoptimize_;
+ }
+
+ void set_has_soft_deoptimize(bool value) {
+ has_soft_deoptimize_ = value;
+ }
+
void MarkRecursive() {
is_recursive_ = true;
}
@@ -373,6 +391,7 @@ class HGraph: public ZoneObject {
int32_t integer_value);
void MarkAsDeoptimizingRecursively(HBasicBlock* block);
+ void NullifyUnreachableInstructions();
void InsertTypeConversions(HInstruction* instr);
void PropagateMinusZeroChecks(HValue* value, BitVector* visited);
void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi);
@@ -384,6 +403,8 @@ class HGraph: public ZoneObject {
void InferTypes(ZoneList<HValue*>* worklist);
void InitializeInferredTypes(int from_inclusive, int to_inclusive);
void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor);
+ void SetupInformativeDefinitionsInBlock(HBasicBlock* block);
+ void SetupInformativeDefinitionsRecursively(HBasicBlock* block);
void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table);
Isolate* isolate_;
@@ -395,6 +416,7 @@ class HGraph: public ZoneObject {
ZoneList<HPhi*>* phi_list_;
ZoneList<HInstruction*>* uint32_instructions_;
SetOncePointer<HConstant> undefined_constant_;
+ SetOncePointer<HConstant> constant_0_;
SetOncePointer<HConstant> constant_1_;
SetOncePointer<HConstant> constant_minus1_;
SetOncePointer<HConstant> constant_true_;
@@ -410,6 +432,7 @@ class HGraph: public ZoneObject {
bool is_recursive_;
bool use_optimistic_licm_;
+ bool has_soft_deoptimize_;
int type_change_checksum_;
DISALLOW_COPY_AND_ASSIGN(HGraph);
@@ -425,7 +448,8 @@ enum FrameType {
JS_CONSTRUCT,
JS_GETTER,
JS_SETTER,
- ARGUMENTS_ADAPTOR
+ ARGUMENTS_ADAPTOR,
+ STUB
};
@@ -436,6 +460,8 @@ class HEnvironment: public ZoneObject {
Handle<JSFunction> closure,
Zone* zone);
+ HEnvironment(Zone* zone, int parameter_count);
+
HEnvironment* arguments_environment() {
return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
}
@@ -443,7 +469,7 @@ class HEnvironment: public ZoneObject {
// Simple accessors.
Handle<JSFunction> closure() const { return closure_; }
const ZoneList<HValue*>* values() const { return &values_; }
- const ZoneList<int>* assigned_variables() const {
+ const GrowableBitVector* assigned_variables() const {
return &assigned_variables_;
}
FrameType frame_type() const { return frame_type_; }
@@ -535,8 +561,16 @@ class HEnvironment: public ZoneObject {
int arguments,
FunctionLiteral* function,
HConstant* undefined,
- CallKind call_kind,
- InliningKind inlining_kind) const;
+ InliningKind inlining_kind,
+ bool undefined_receiver) const;
+
+ static bool UseUndefinedReceiver(Handle<JSFunction> closure,
+ FunctionLiteral* function,
+ CallKind call_kind,
+ InliningKind inlining_kind) {
+ return (closure->shared()->native() || !function->is_classic_mode()) &&
+ call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN;
+ }
HEnvironment* DiscardInlined(bool drop_extra) {
HEnvironment* outer = outer_;
@@ -550,7 +584,7 @@ class HEnvironment: public ZoneObject {
void ClearHistory() {
pop_count_ = 0;
push_count_ = 0;
- assigned_variables_.Rewind(0);
+ assigned_variables_.Clear();
}
void SetValueAt(int index, HValue* value) {
@@ -599,7 +633,7 @@ class HEnvironment: public ZoneObject {
Handle<JSFunction> closure_;
// Value array [parameters] [specials] [locals] [temporaries].
ZoneList<HValue*> values_;
- ZoneList<int> assigned_variables_;
+ GrowableBitVector assigned_variables_;
FrameType frame_type_;
int parameter_count_;
int specials_count_;
@@ -613,7 +647,26 @@ class HEnvironment: public ZoneObject {
};
-class HGraphBuilder;
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+ explicit HInferRepresentation(HGraph* graph)
+ : graph_(graph),
+ worklist_(8, graph->zone()),
+ in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
+
+ void Analyze();
+ void AddToWorklist(HValue* current);
+
+ private:
+ Zone* zone() const { return graph_->zone(); }
+
+ HGraph* graph_;
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+};
+
+
+class HOptimizedGraphBuilder;
enum ArgumentsAllowedFlag {
ARGUMENTS_NOT_ALLOWED,
@@ -649,10 +702,10 @@ class AstContext {
bool is_for_typeof() { return for_typeof_; }
protected:
- AstContext(HGraphBuilder* owner, Expression::Context kind);
+ AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
virtual ~AstContext();
- HGraphBuilder* owner() const { return owner_; }
+ HOptimizedGraphBuilder* owner() const { return owner_; }
inline Zone* zone() const;
@@ -663,7 +716,7 @@ class AstContext {
#endif
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
Expression::Context kind_;
AstContext* outer_;
bool for_typeof_;
@@ -672,7 +725,7 @@ class AstContext {
class EffectContext: public AstContext {
public:
- explicit EffectContext(HGraphBuilder* owner)
+ explicit EffectContext(HOptimizedGraphBuilder* owner)
: AstContext(owner, Expression::kEffect) {
}
virtual ~EffectContext();
@@ -685,7 +738,7 @@ class EffectContext: public AstContext {
class ValueContext: public AstContext {
public:
- explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag)
+ ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
: AstContext(owner, Expression::kValue), flag_(flag) {
}
virtual ~ValueContext();
@@ -703,7 +756,7 @@ class ValueContext: public AstContext {
class TestContext: public AstContext {
public:
- TestContext(HGraphBuilder* owner,
+ TestContext(HOptimizedGraphBuilder* owner,
Expression* condition,
TypeFeedbackOracle* oracle,
HBasicBlock* if_true,
@@ -743,7 +796,7 @@ class TestContext: public AstContext {
class FunctionState {
public:
- FunctionState(HGraphBuilder* owner,
+ FunctionState(HOptimizedGraphBuilder* owner,
CompilationInfo* info,
TypeFeedbackOracle* oracle,
InliningKind inlining_kind);
@@ -773,7 +826,7 @@ class FunctionState {
bool arguments_pushed() { return arguments_elements() != NULL; }
private:
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
CompilationInfo* compilation_info_;
TypeFeedbackOracle* oracle_;
@@ -805,7 +858,175 @@ class FunctionState {
};
-class HGraphBuilder: public AstVisitor {
+class HGraphBuilder {
+ public:
+ explicit HGraphBuilder(CompilationInfo* info)
+ : info_(info), graph_(NULL), current_block_(NULL) {}
+ virtual ~HGraphBuilder() {}
+
+ HBasicBlock* current_block() const { return current_block_; }
+ void set_current_block(HBasicBlock* block) { current_block_ = block; }
+ HEnvironment* environment() const {
+ return current_block()->last_environment();
+ }
+ Zone* zone() const { return info_->zone(); }
+ HGraph* graph() { return graph_; }
+
+ HGraph* CreateGraph();
+
+ // Adding instructions.
+ HInstruction* AddInstruction(HInstruction* instr);
+ void AddSimulate(BailoutId id,
+ RemovableSimulate removable = FIXED_SIMULATE);
+ HBoundsCheck* AddBoundsCheck(
+ HValue* index,
+ HValue* length,
+ BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY,
+ Representation r = Representation::None());
+
+ protected:
+ virtual bool BuildGraph() = 0;
+
+ HBasicBlock* CreateBasicBlock(HEnvironment* env);
+ HBasicBlock* CreateLoopHeaderBlock();
+
+ // Building common constructs
+ HInstruction* BuildExternalArrayElementAccess(
+ HValue* external_elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildFastElementAccess(
+ HValue* elements,
+ HValue* checked_key,
+ HValue* val,
+ HValue* dependency,
+ ElementsKind elements_kind,
+ bool is_store);
+
+ HInstruction* BuildUncheckedMonomorphicElementAccess(
+ HValue* object,
+ HValue* key,
+ HValue* val,
+ HCheckMaps* mapcheck,
+ bool is_js_array,
+ ElementsKind elements_kind,
+ bool is_store,
+ Representation checked_index_representation = Representation::None());
+
+ HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
+ HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
+
+ class CheckBuilder {
+ public:
+ CheckBuilder(HGraphBuilder* builder, BailoutId id);
+ ~CheckBuilder() {
+ if (!finished_) End();
+ }
+
+ void CheckNotUndefined(HValue* value);
+ void CheckIntegerEq(HValue* left, HValue* right);
+ void End();
+
+ private:
+ Zone* zone() { return builder_->zone(); }
+
+ HGraphBuilder* builder_;
+ bool finished_;
+ HBasicBlock* failure_block_;
+ HBasicBlock* merge_block_;
+ BailoutId id_;
+ };
+
+ class IfBuilder {
+ public:
+ IfBuilder(HGraphBuilder* builder, BailoutId id);
+ ~IfBuilder() {
+ if (!finished_) End();
+ }
+
+ HInstruction* BeginTrue(
+ HValue* left,
+ HValue* right,
+ Token::Value token,
+ Representation input_representation = Representation::Integer32());
+ void BeginFalse();
+ void End();
+
+ private:
+ Zone* zone() { return builder_->zone(); }
+
+ HGraphBuilder* builder_;
+ bool finished_;
+ HBasicBlock* first_true_block_;
+ HBasicBlock* last_true_block_;
+ HBasicBlock* first_false_block_;
+ HBasicBlock* merge_block_;
+ BailoutId id_;
+ };
+
+ class LoopBuilder {
+ public:
+ enum Direction {
+ kPreIncrement,
+ kPostIncrement,
+ kPreDecrement,
+ kPostDecrement
+ };
+
+ LoopBuilder(HGraphBuilder* builder,
+ HValue* context,
+ Direction direction,
+ BailoutId id);
+ ~LoopBuilder() {
+ ASSERT(finished_);
+ }
+
+ HValue* BeginBody(
+ HValue* initial,
+ HValue* terminating,
+ Token::Value token,
+ Representation input_representation = Representation::Integer32());
+ void EndBody();
+
+ private:
+ Zone* zone() { return builder_->zone(); }
+
+ HGraphBuilder* builder_;
+ HValue* context_;
+ HInstruction* increment_;
+ HPhi* phi_;
+ HBasicBlock* header_block_;
+ HBasicBlock* body_block_;
+ HBasicBlock* exit_block_;
+ Direction direction_;
+ BailoutId id_;
+ bool finished_;
+ };
+
+ HValue* BuildAllocateElements(HContext* context,
+ ElementsKind kind,
+ HValue* capacity);
+
+ void BuildCopyElements(HContext* context,
+ HValue* from_elements,
+ ElementsKind from_elements_kind,
+ HValue* to_elements,
+ ElementsKind to_elements_kind,
+ HValue* length);
+
+ private:
+ HGraphBuilder();
+ CompilationInfo* info_;
+ HGraph* graph_;
+ HBasicBlock* current_block_;
+};
+
+
+class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH };
@@ -841,7 +1062,8 @@ class HGraphBuilder: public AstVisitor {
// structures mirroring BreakableStatement nesting.
class BreakAndContinueScope BASE_EMBEDDED {
public:
- BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
+ BreakAndContinueScope(BreakAndContinueInfo* info,
+ HOptimizedGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
@@ -849,7 +1071,7 @@ class HGraphBuilder: public AstVisitor {
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
- HGraphBuilder* owner() { return owner_; }
+ HOptimizedGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
@@ -857,30 +1079,21 @@ class HGraphBuilder: public AstVisitor {
private:
BreakAndContinueInfo* info_;
- HGraphBuilder* owner_;
+ HOptimizedGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
- HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
+ HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle);
- HGraph* CreateGraph();
+ virtual bool BuildGraph();
// Simple accessors.
- HGraph* graph() const { return graph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
- HBasicBlock* current_block() const { return current_block_; }
- void set_current_block(HBasicBlock* block) { current_block_ = block; }
- HEnvironment* environment() const {
- return current_block()->last_environment();
- }
-
bool inline_bailout() { return inline_bailout_; }
- // Adding instructions.
- HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id);
+ void AddSoftDeoptimize();
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -904,9 +1117,12 @@ class HGraphBuilder: public AstVisitor {
void operator delete(void* pointer, Zone* zone) { }
void operator delete(void* pointer) { }
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
// Type of a member function that generates inline code for a native function.
- typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
+ typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
+ (CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@@ -1025,10 +1241,6 @@ class HGraphBuilder: public AstVisitor {
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
- void TraceRepresentation(Token::Value op,
- TypeInfo info,
- HValue* value,
- Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetUpScope(Scope* scope);
@@ -1038,9 +1250,6 @@ class HGraphBuilder: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
- HBasicBlock* CreateBasicBlock(HEnvironment* env);
- HBasicBlock* CreateLoopHeaderBlock();
-
// Helpers for flow graph construction.
enum GlobalPropertyAccess {
kUseCell,
@@ -1071,6 +1280,9 @@ class HGraphBuilder: public AstVisitor {
bool TryInlineSetter(Handle<JSFunction> setter,
Assignment* assignment,
HValue* implicit_return_value);
+ bool TryInlineApply(Handle<JSFunction> function,
+ Call* expr,
+ int arguments_count);
bool TryInlineBuiltinMethodCall(Call* expr,
HValue* receiver,
Handle<Map> receiver_map,
@@ -1111,33 +1323,22 @@ class HGraphBuilder: public AstVisitor {
HValue* value,
NilValue nil);
- HStringCharCodeAt* BuildStringCharCodeAt(HValue* context,
- HValue* string,
- HValue* index);
+ HInstruction* BuildStringCharCodeAt(HValue* context,
+ HValue* string,
+ HValue* index);
HInstruction* BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right);
HInstruction* BuildIncrement(bool returns_original_input,
CountOperation* expr);
- HInstruction* BuildFastElementAccess(HValue* elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
+ HInstruction* BuildLoadKeyedGeneric(HValue* object,
+ HValue* key);
HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
HValue* key,
HValue* val,
SmallMapList* maps);
- HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object,
- HValue* key,
- HValue* val,
- HCheckMaps* mapcheck,
- Handle<Map> map,
- bool is_store);
-
HInstruction* BuildMonomorphicElementAccess(HValue* object,
HValue* key,
HValue* val,
@@ -1177,14 +1378,6 @@ class HGraphBuilder: public AstVisitor {
Handle<String> name,
Property* expr,
Handle<Map> map);
- HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key);
- HInstruction* BuildExternalArrayElementAccess(
- HValue* external_elements,
- HValue* checked_key,
- HValue* val,
- HValue* dependency,
- ElementsKind elements_kind,
- bool is_store);
void AddCheckMapsWithTransitions(HValue* object,
Handle<Map> map);
@@ -1226,8 +1419,6 @@ class HGraphBuilder: public AstVisitor {
HValue** operand,
HValue** shift_amount);
- Zone* zone() const { return zone_; }
-
// The translation state of the currently-being-translated function.
FunctionState* function_state_;
@@ -1241,20 +1432,16 @@ class HGraphBuilder: public AstVisitor {
// A stack of breakable statements entered.
BreakAndContinueScope* break_scope_;
- HGraph* graph_;
- HBasicBlock* current_block_;
-
int inlined_count_;
ZoneList<Handle<Object> > globals_;
- Zone* zone_;
-
bool inline_bailout_;
friend class FunctionState; // Pushes and pops the state stack.
friend class AstContext; // Pushes and pops the AST context stack.
+ friend class KeyedLoadFastElementStub;
- DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
+ DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
};
@@ -1358,12 +1545,22 @@ class HStatistics: public Malloced {
return instance.get();
}
+ void IncrementSubtotals(int64_t create_graph,
+ int64_t optimize_graph,
+ int64_t generate_code) {
+ create_graph_ += create_graph;
+ optimize_graph_ += optimize_graph;
+ generate_code_ += generate_code;
+ }
+
private:
HStatistics()
: timing_(5),
names_(5),
sizes_(5),
- total_(0),
+ create_graph_(0),
+ optimize_graph_(0),
+ generate_code_(0),
total_size_(0),
full_code_gen_(0),
source_size_(0) { }
@@ -1371,7 +1568,9 @@ class HStatistics: public Malloced {
List<int64_t> timing_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t total_;
+ int64_t create_graph_;
+ int64_t optimize_graph_;
+ int64_t generate_code_;
unsigned total_size_;
int64_t full_code_gen_;
double source_size_;
@@ -1381,7 +1580,6 @@ class HStatistics: public Malloced {
class HPhase BASE_EMBEDDED {
public:
static const char* const kFullCodeGen;
- static const char* const kTotal;
explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
HPhase(const char* name, HGraph* graph) {
@@ -1416,7 +1614,7 @@ class HPhase BASE_EMBEDDED {
class HTracer: public Malloced {
public:
- void TraceCompilation(FunctionLiteral* function);
+ void TraceCompilation(CompilationInfo* info);
void TraceHydrogen(const char* name, HGraph* graph);
void TraceLithium(const char* name, LChunk* chunk);
void TraceLiveRanges(const char* name, LAllocator* allocator);
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
index 114f878..56d88b0 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32-inl.h
@@ -300,7 +300,7 @@ void RelocInfo::Visit(Heap* heap) {
Immediate::Immediate(int x) {
x_ = x;
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -326,20 +326,20 @@ Immediate::Immediate(Handle<Object> handle) {
} else {
// no relocation needed
x_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
Immediate::Immediate(Smi* value) {
x_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
Immediate::Immediate(Address addr) {
x_ = reinterpret_cast<int32_t>(addr);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -366,7 +366,7 @@ void Assembler::emit(Handle<Object> handle) {
void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) {
if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) {
RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt());
- } else if (rmode != RelocInfo::NONE) {
+ } else if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode);
}
emit(x);
@@ -379,7 +379,7 @@ void Assembler::emit(const Immediate& x) {
emit_code_relative_offset(label);
return;
}
- if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_);
+ if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_);
emit(x.x_);
}
@@ -396,7 +396,7 @@ void Assembler::emit_code_relative_offset(Label* label) {
void Assembler::emit_w(const Immediate& x) {
- ASSERT(x.rmode_ == RelocInfo::NONE);
+ ASSERT(RelocInfo::IsNone(x.rmode_));
uint16_t value = static_cast<uint16_t>(x.x_);
reinterpret_cast<uint16_t*>(pc_)[0] = value;
pc_ += sizeof(uint16_t);
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.cc b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
index 06fc411..123383c 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.cc
@@ -55,6 +55,39 @@ uint64_t CpuFeatures::supported_ = 0;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
+int IntelDoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumAllocatableRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumAllocatableRegisters;
+ }
+}
+
+
+int IntelDoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::kNumRegisters;
+ } else {
+ return X87TopOfStackRegister::kNumRegisters;
+ }
+}
+
+
+const char* IntelDoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ return XMMRegister::AllocationIndexToString(index);
+ } else {
+ return X87TopOfStackRegister::AllocationIndexToString(index);
+ }
+}
+
+
// The Probe method needs executable memory, so it uses Heap::CreateCode.
// Allocation failure is silent and leads to safe default.
void CpuFeatures::Probe() {
@@ -209,7 +242,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->call(target, RelocInfo::NONE);
+ patcher.masm()->call(target, RelocInfo::NONE32);
// Check that the size of the code generated is as expected.
ASSERT_EQ(kCallCodeSize,
@@ -228,11 +261,11 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) {
// [base + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
// [base]
set_modrm(0, base);
if (base.is(esp)) set_sib(times_1, esp, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
// [base + disp8]
set_modrm(1, base);
if (base.is(esp)) set_sib(times_1, esp, base);
@@ -253,11 +286,11 @@ Operand::Operand(Register base,
RelocInfo::Mode rmode) {
ASSERT(!index.is(esp)); // illegal addressing mode
// [base + index*scale + disp/r]
- if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) {
+ if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
// [base + index*scale]
set_modrm(0, esp);
set_sib(scale, index, base);
- } else if (is_int8(disp) && rmode == RelocInfo::NONE) {
+ } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) {
// [base + index*scale + disp8]
set_modrm(1, esp);
set_sib(scale, index, base);
@@ -312,47 +345,19 @@ Register Operand::reg() const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
positions_recorder_(this) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
+ memset(buffer_, 0xCC, buffer_size_); // int3
}
#endif
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@@ -360,18 +365,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -1193,7 +1186,7 @@ void Assembler::test(Register reg, const Immediate& imm) {
EnsureSpace ensure_space(this);
// Only use test against byte for registers that have a byte
// variant: eax, ebx, ecx, and edx.
- if (imm.rmode_ == RelocInfo::NONE &&
+ if (RelocInfo::IsNone(imm.rmode_) &&
is_uint8(imm.x_) &&
reg.is_byte_register()) {
uint8_t imm8 = imm.x_;
@@ -2006,6 +1999,16 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::addsd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2016,6 +2019,16 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2123,6 +2136,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
+void Assembler::movmskps(Register dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x50);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2219,7 +2241,8 @@ void Assembler::prefetch(const Operand& src, int level) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
EMIT(0x18);
- XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M.
+ // Emit hint number in Reg position of RegR/M.
+ XMMRegister code = XMMRegister::from_code(level);
emit_sse_operand(code, src);
}
@@ -2412,7 +2435,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
@@ -2464,7 +2487,7 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
void Assembler::Print() {
- Disassembler::Decode(stdout, buffer_, pc_);
+ Disassembler::Decode(isolate(), stdout, buffer_, pc_);
}
@@ -2597,7 +2620,7 @@ void Assembler::emit_operand(Register reg, const Operand& adr) {
pc_ += length;
// Emit relocation information if necessary.
- if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) {
+ if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) {
pc_ -= sizeof(int32_t); // pc_ must be *at* disp32
RecordRelocInfo(adr.rmode_);
pc_ += sizeof(int32_t);
@@ -2626,7 +2649,7 @@ void Assembler::dd(uint32_t data) {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
+ ASSERT(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
diff --git a/src/3rdparty/v8/src/ia32/assembler-ia32.h b/src/3rdparty/v8/src/ia32/assembler-ia32.h
index 9fb7baa..315bc17 100644
--- a/src/3rdparty/v8/src/ia32/assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/assembler-ia32.h
@@ -65,7 +65,10 @@ namespace internal {
// and best performance in optimized code.
//
struct Register {
- static const int kNumAllocatableRegisters = 6;
+ static const int kMaxNumAllocatableRegisters = 6;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 8;
static inline const char* AllocationIndexToString(int index);
@@ -119,7 +122,7 @@ const Register no_reg = { kRegister_no_reg_Code };
inline const char* Register::AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
// This is the mapping of allocation indices to registers.
const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
return kNames[index];
@@ -133,22 +136,70 @@ inline int Register::ToAllocationIndex(Register reg) {
inline Register Register::FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return (index >= 4) ? from_code(index + 2) : from_code(index);
}
-struct XMMRegister {
- static const int kNumAllocatableRegisters = 7;
- static const int kNumRegisters = 8;
+struct IntelDoubleRegister {
+ static const int kMaxNumRegisters = 8;
+ static const int kMaxNumAllocatableRegisters = 7;
+ static int NumAllocatableRegisters();
+ static int NumRegisters();
+ static const char* AllocationIndexToString(int index);
- static int ToAllocationIndex(XMMRegister reg) {
+ static int ToAllocationIndex(IntelDoubleRegister reg) {
ASSERT(reg.code() != 0);
return reg.code() - 1;
}
+ static IntelDoubleRegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ return from_code(index + 1);
+ }
+
+ static IntelDoubleRegister from_code(int code) {
+ IntelDoubleRegister result = { code };
+ return result;
+ }
+
+ bool is_valid() const {
+ return 0 <= code_ && code_ < NumRegisters();
+ }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+
+ int code_;
+};
+
+
+const IntelDoubleRegister double_register_0 = { 0 };
+const IntelDoubleRegister double_register_1 = { 1 };
+const IntelDoubleRegister double_register_2 = { 2 };
+const IntelDoubleRegister double_register_3 = { 3 };
+const IntelDoubleRegister double_register_4 = { 4 };
+const IntelDoubleRegister double_register_5 = { 5 };
+const IntelDoubleRegister double_register_6 = { 6 };
+const IntelDoubleRegister double_register_7 = { 7 };
+
+
+struct XMMRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 7;
+ static const int kNumRegisters = 8;
+
+ static XMMRegister from_code(int code) {
+ STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister));
+ XMMRegister result;
+ result.code_ = code;
+ return result;
+ }
+
+ bool is(XMMRegister reg) const { return code_ == reg.code_; }
+
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
return from_code(index + 1);
}
@@ -165,34 +216,46 @@ struct XMMRegister {
};
return names[index];
}
+};
- static XMMRegister from_code(int code) {
- XMMRegister r = { code };
- return r;
+
+#define xmm0 (static_cast<const XMMRegister&>(double_register_0))
+#define xmm1 (static_cast<const XMMRegister&>(double_register_1))
+#define xmm2 (static_cast<const XMMRegister&>(double_register_2))
+#define xmm3 (static_cast<const XMMRegister&>(double_register_3))
+#define xmm4 (static_cast<const XMMRegister&>(double_register_4))
+#define xmm5 (static_cast<const XMMRegister&>(double_register_5))
+#define xmm6 (static_cast<const XMMRegister&>(double_register_6))
+#define xmm7 (static_cast<const XMMRegister&>(double_register_7))
+
+
+struct X87TopOfStackRegister : IntelDoubleRegister {
+ static const int kNumAllocatableRegisters = 1;
+ static const int kNumRegisters = 1;
+
+ bool is(X87TopOfStackRegister reg) const {
+ return code_ == reg.code_;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
- bool is(XMMRegister reg) const { return code_ == reg.code_; }
- int code() const {
- ASSERT(is_valid());
- return code_;
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "st0",
+ };
+ return names[index];
}
- int code_;
+ static int ToAllocationIndex(X87TopOfStackRegister reg) {
+ ASSERT(reg.code() == 0);
+ return 0;
+ }
};
+#define x87tos \
+ static_cast<const X87TopOfStackRegister&>(double_register_0)
-const XMMRegister xmm0 = { 0 };
-const XMMRegister xmm1 = { 1 };
-const XMMRegister xmm2 = { 2 };
-const XMMRegister xmm3 = { 3 };
-const XMMRegister xmm4 = { 4 };
-const XMMRegister xmm5 = { 5 };
-const XMMRegister xmm6 = { 6 };
-const XMMRegister xmm7 = { 7 };
-
-typedef XMMRegister DoubleRegister;
+typedef IntelDoubleRegister DoubleRegister;
enum Condition {
@@ -275,12 +338,12 @@ class Immediate BASE_EMBEDDED {
return Immediate(label);
}
- bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; }
+ bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); }
bool is_int8() const {
- return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE;
+ return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_);
}
bool is_int16() const {
- return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE;
+ return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_);
}
private:
@@ -320,20 +383,20 @@ class Operand BASE_EMBEDDED {
// [base + disp/r]
explicit Operand(Register base, int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
// [base + index*scale + disp/r]
explicit Operand(Register base,
Register index,
ScaleFactor scale,
int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
// [index*scale + disp/r]
explicit Operand(Register index,
ScaleFactor scale,
int32_t disp,
- RelocInfo::Mode rmode = RelocInfo::NONE);
+ RelocInfo::Mode rmode = RelocInfo::NONE32);
static Operand StaticVariable(const ExternalReference& ext) {
return Operand(reinterpret_cast<int32_t>(ext.address()),
@@ -549,6 +612,7 @@ class CpuFeatures : public AllStatic {
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -582,7 +646,7 @@ class Assembler : public AssemblerBase {
// upon destruction of the assembler.
// TODO(vitalyr): the assembler does not need an isolate.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -990,8 +1054,10 @@ class Assembler : public AssemblerBase {
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
+ void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
@@ -1013,6 +1079,7 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
+ void movmskps(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
@@ -1048,7 +1115,7 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
+ void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
@@ -1091,8 +1158,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return pc_ - buffer_; }
-
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -1111,7 +1176,6 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
@@ -1177,15 +1241,7 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// code generation
- byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
PositionsRecorder positions_recorder_;
diff --git a/src/3rdparty/v8/src/ia32/builtins-ia32.cc b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
index 01785bb..e3b2b7b 100644
--- a/src/3rdparty/v8/src/ia32/builtins-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/builtins-ia32.cc
@@ -257,6 +257,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ AllocateInNewSpace(FixedArray::kHeaderSize,
times_pointer_size,
edx,
+ REGISTER_VALUE_IS_INT32,
edi,
ecx,
no_reg,
@@ -382,6 +383,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
__ j(above_equal, &exit);
+ // Symbols are "objects".
+ __ CmpInstanceType(ecx, SYMBOL_TYPE);
+ __ j(equal, &exit);
+
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -574,6 +579,25 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ pushad();
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(esp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1083,8 +1107,9 @@ static void AllocateJSArray(MacroAssembler* masm,
// requested elements.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size, // array_size is a smi.
+ times_pointer_size,
array_size,
+ REGISTER_VALUE_IS_SMI,
result,
elements_array_end,
scratch,
@@ -1454,34 +1479,66 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : argc
+ // -- ebx : type info cell
// -- edi : constructor
// -- esp[0] : return address
// -- esp[4] : last argument
// -----------------------------------
- Label generic_constructor;
-
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ test(ebx, Immediate(kSmiTagMask));
+ __ test(ecx, Immediate(kSmiTagMask));
__ Assert(not_zero, "Unexpected initial map for Array function");
- __ CmpObjectType(ebx, MAP_TYPE, ecx);
+ __ CmpObjectType(ecx, MAP_TYPE, ecx);
__ Assert(equal, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, true, &generic_constructor);
+ if (FLAG_optimize_constructed_arrays) {
+ // We should either have undefined in ebx or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ cmp(ebx, Immediate(undefined_sentinel));
+ __ j(equal, &okay_here);
+ __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map));
+ __ Assert(equal, "Expected property cell in register ebx");
+ __ bind(&okay_here);
+ }
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ test(eax, eax);
+ __ j(not_zero, &not_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(&not_zero_case);
+ __ cmp(eax, 1);
+ __ j(greater, &not_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, true, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
index 80954b8..44df82a 100644
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/code-stubs-ia32.cc
@@ -34,12 +34,88 @@
#include "isolate.h"
#include "jsregexp.h"
#include "regexp-macro-assembler.h"
+#include "runtime.h"
#include "stub-cache.h"
#include "codegen.h"
+#include "runtime.h"
namespace v8 {
namespace internal {
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx, edx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { edx, ecx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
+
+
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // edi -- constructor function
+ // ebx -- type info cell with elements kind
+ // eax -- number of arguments to the constructor function
+ static Register registers[] = { edi, ebx };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &eax;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -221,8 +297,10 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), ebx);
// Copy the qml global object from the previous context.
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)), ebx);
+ __ mov(ebx,
+ Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
+ ebx);
// Initialize the rest of the slots to undefined.
@@ -320,6 +398,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
+ AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
//
@@ -333,11 +412,27 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
- int size = JSArray::kSize + elements_size;
+ int size = JSArray::kSize;
+ int allocation_info_start = size;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
+ AllocationFlags flags = TAG_OBJECT;
+ if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+ flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+ }
+ __ AllocateInNewSpace(size, eax, ebx, edx, fail, flags);
+
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ mov(FieldOperand(eax, allocation_info_start),
+ Immediate(Handle<Map>(masm->isolate()->heap()->
+ allocation_site_info_map())));
+ __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
+ }
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -351,7 +446,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ lea(edx, Operand(eax, JSArray::kSize));
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
+ } else {
+ __ lea(edx, Operand(eax, JSArray::kSize));
+ }
__ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
// Copy the elements array.
@@ -406,15 +505,17 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
__ CheckMap(ebx, factory->fixed_cow_array_map(),
&check_fast_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
__ ret(3 * kPointerSize);
__ bind(&check_fast_elements);
__ CheckMap(ebx, factory->fixed_array_map(),
&double_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
__ ret(3 * kPointerSize);
__ bind(&double_elements);
@@ -443,7 +544,10 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(ecx);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
+ allocation_site_mode_,
+ &slow_case);
+
// Return and remove the on-stack parameters.
__ ret(3 * kPointerSize);
@@ -452,52 +556,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: object literal flags.
- // [esp + (2 * kPointerSize)]: constant properties.
- // [esp + (3 * kPointerSize)]: literal index.
- // [esp + (4 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
- __ mov(eax, Operand(esp, 3 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- __ j(equal, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
- __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
- __ cmp(eax, Immediate(size >> kPointerSizeLog2));
- __ j(not_equal, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
-
- // Return and remove the on-stack parameters.
- __ ret(4 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@@ -705,25 +763,14 @@ class FloatingPointHelper : public AllStatic {
Label* non_float,
Register scratch);
- // Checks that the two floating point numbers on top of the FPU stack
- // have int32 values.
- static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32);
-
// Takes the operands in edx and eax and loads them as integers in eax
// and ecx.
static void LoadUnknownsAsIntegers(MacroAssembler* masm,
bool use_sse3,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
Label* operand_conversion_failure);
- // Must only be called after LoadUnknownsAsIntegers. Assumes that the
- // operands are pushed on the stack, and that their conversions to int32
- // are in eax and ecx. Checks that the original numbers were in the int32
- // range.
- static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32);
-
// Assumes that operands are smis or heap numbers and loads them
// into xmm0 and xmm1. Operands are in edx and eax.
// Leaves operands unchanged.
@@ -744,6 +791,15 @@ class FloatingPointHelper : public AllStatic {
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch);
+
+ // Checks that |operand| has an int32 value. If |int32_result| is different
+ // from |scratch|, it will contain that int32 value.
+ static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register int32_result,
+ Register scratch,
+ XMMRegister xmm_scratch);
};
@@ -764,11 +820,20 @@ static void IntegerConvert(MacroAssembler* masm,
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, ecx);
+ // If the exponent is above 83, the number contains no significant
+ // bits in the range 0..2^31, so the result is zero.
+ static const uint32_t kResultIsZeroExponent = 83;
+ __ cmp(scratch2, Immediate(kResultIsZeroExponent));
+ __ j(above, &done);
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
// Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent =
- (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ static const uint32_t kTooBigExponent = 63;
__ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
@@ -780,15 +845,11 @@ static void IntegerConvert(MacroAssembler* masm,
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
- const uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ const uint32_t non_smi_exponent = 30;
__ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
@@ -800,8 +861,7 @@ static void IntegerConvert(MacroAssembler* masm,
{
// Handle a big exponent. The only reason we have this code is that the
// >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent =
- (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ const uint32_t big_non_smi_exponent = 31;
__ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
@@ -830,19 +890,8 @@ static void IntegerConvert(MacroAssembler* masm,
}
__ bind(&normal_exponent);
- // Exponent word in scratch, exponent part of exponent word in scratch2.
- // Zero in ecx.
- // We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
- // it rounds to zero.
- const uint32_t zero_exponent =
- (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
- __ sub(scratch2, Immediate(zero_exponent));
- // ecx already has a Smi zero.
- __ j(less, &done, Label::kNear);
-
- // We have a shifted exponent between 0 and 30 in scratch2.
- __ shr(scratch2, HeapNumber::kExponentShift);
+ // Exponent word in scratch, exponent in scratch2. Zero in ecx.
+ // We know that 0 <= exponent < 30.
__ mov(ecx, Immediate(30));
__ sub(ecx, scratch2);
@@ -877,8 +926,20 @@ static void IntegerConvert(MacroAssembler* masm,
__ jmp(&done, Label::kNear);
__ bind(&negative);
__ sub(ecx, scratch2);
- __ bind(&done);
}
+ __ bind(&done);
+}
+
+
+// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to
+// |conversion_failure| if the heap number did not contain an int32 value.
+// Result is in ecx. Trashes ebx, xmm0, and xmm1.
+static void ConvertHeapNumberToInt32(MacroAssembler* masm,
+ Register source,
+ Label* conversion_failure) {
+ __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset));
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, conversion_failure, xmm0, ecx, ebx, xmm1);
}
@@ -905,8 +966,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
+ case UnaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
@@ -1010,13 +1071,13 @@ void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
- GenerateHeapNumberStubSub(masm);
+ GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
+ GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -1024,7 +1085,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, undo, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
__ bind(&non_smi);
@@ -1038,7 +1099,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubBitNot(
+void UnaryOpStub::GenerateNumberStubBitNot(
MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
@@ -1201,16 +1262,17 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
__ push(eax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1219,7 +1281,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -1229,11 +1291,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
// Left and right arguments are already on top of the stack.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
- __ push(Immediate(Smi::FromInt(op_)));
- __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1242,73 +1300,22 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
// dividend in eax and edx free for the division. Use eax, ebx for those.
Comment load_comment(masm, "-- Load arguments");
Register left = edx;
Register right = eax;
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
left = eax;
right = ebx;
__ mov(ebx, eax);
@@ -1321,7 +1328,7 @@ void BinaryOpStub::GenerateSmiCode(
Label not_smis;
Register combined = ecx;
ASSERT(!left.is(combined) && !right.is(combined));
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Perform the operation into eax and smi check the result. Preserve
// eax in case the result is not a smi.
@@ -1365,7 +1372,7 @@ void BinaryOpStub::GenerateSmiCode(
// eax and check the result if necessary.
Comment perform_smi(masm, "-- Perform smi operation");
Label use_fp_on_smis;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
// Nothing to do.
break;
@@ -1499,7 +1506,7 @@ void BinaryOpStub::GenerateSmiCode(
}
// 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -1522,9 +1529,9 @@ void BinaryOpStub::GenerateSmiCode(
// 6. For some operations emit inline code to perform floating point
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
- if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
+ if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
__ bind(&use_fp_on_smis);
- switch (op_) {
+ switch (op) {
// Undo the effects of some operations, and some register moves.
case Token::SHL:
// The arguments are saved on the stack, and only used from there.
@@ -1552,8 +1559,8 @@ void BinaryOpStub::GenerateSmiCode(
}
__ jmp(&not_smis);
} else {
- ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
- switch (op_) {
+ ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
+ switch (op) {
case Token::SHL:
case Token::SHR: {
Comment perform_float(masm, "-- Perform float operation on smis");
@@ -1564,13 +1571,13 @@ void BinaryOpStub::GenerateSmiCode(
// Store the result in the HeapNumber and return.
// It's OK to overwrite the arguments on the stack because we
// are about to return.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ mov(Operand(esp, 1 * kPointerSize), left);
__ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
__ fild_d(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
} else {
- ASSERT_EQ(Token::SHL, op_);
+ ASSERT_EQ(Token::SHL, op);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, left);
@@ -1592,7 +1599,7 @@ void BinaryOpStub::GenerateSmiCode(
Comment perform_float(masm, "-- Perform float operation on smis");
__ bind(&use_fp_on_smis);
// Restore arguments to edx, eax.
- switch (op_) {
+ switch (op) {
case Token::ADD:
// Revert right = right + left.
__ sub(right, left);
@@ -1618,7 +1625,7 @@ void BinaryOpStub::GenerateSmiCode(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1628,7 +1635,7 @@ void BinaryOpStub::GenerateSmiCode(
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
@@ -1651,7 +1658,7 @@ void BinaryOpStub::GenerateSmiCode(
// edx and eax.
Comment done_comment(masm, "-- Enter non-smi code");
__ bind(&not_smis);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
case Token::SHL:
case Token::SAR:
@@ -1698,9 +1705,11 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
- GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
} else {
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
__ bind(&call_runtime);
switch (op_) {
@@ -1725,19 +1734,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1765,6 +1764,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
}
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
@@ -1772,7 +1776,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
// Floating point case.
switch (op_) {
@@ -1785,6 +1789,18 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, &not_int32);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, &not_int32);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
if (op_ == Token::MOD) {
@@ -1800,14 +1816,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check result type if it is currently Int32.
if (result_type_ <= BinaryOpIC::INT32) {
- __ cvttsd2si(ecx, Operand(xmm0));
- __ cvtsi2sd(xmm2, ecx);
- __ pcmpeqd(xmm2, xmm0);
- __ movmskpd(ecx, xmm2);
- __ test(ecx, Immediate(1));
- __ j(zero, &not_int32);
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, &not_int32, xmm0, ecx, ecx, xmm2);
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
}
@@ -1817,7 +1829,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
masm,
ecx,
FloatingPointHelper::ARGS_IN_REGISTERS);
- FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
if (op_ == Token::MOD) {
// The operands are now on the FPU stack, but we don't need them.
__ fstp(0);
@@ -1833,7 +1844,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -1858,11 +1870,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_floats;
Label not_int32;
Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
- &not_int32);
+ bool use_sse3 = platform_specific_bit_;
+ FloatingPointHelper::LoadUnknownsAsIntegers(
+ masm, use_sse3, left_type_, right_type_, &not_floats);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
case Token::BIT_AND: __ and_(eax, ecx); break;
@@ -1934,44 +1944,24 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- break;
+ return; // Handled above.
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2004,11 +1994,11 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ bind(&done);
- GenerateHeapNumberStub(masm);
+ GenerateNumberStub(masm);
}
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
Label call_runtime;
// Floating point case.
@@ -2020,7 +2010,28 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label not_floats;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ // In theory, we would need the same check in the non-SSE2 case,
+ // but since we don't support Crankshaft on such hardware we can
+ // afford not to care about precise type feedback.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, &not_floats);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, &not_floats);
+ }
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
+ if (left_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, &not_floats, xmm0, ecx, ecx, xmm2);
+ }
+ if (right_type_ == BinaryOpIC::INT32) {
+ FloatingPointHelper::CheckSSE2OperandIsInt32(
+ masm, &not_floats, xmm1, ecx, ecx, xmm2);
+ }
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -2029,7 +2040,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -2046,7 +2057,8 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2072,9 +2084,12 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
Label not_floats;
Label non_smi_result;
- FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
- &not_floats);
+ // We do not check the input arguments here, as any value is
+ // unconditionally truncated to an int32 anyway. To get the
+ // right optimized code, int32 type feedback is just right.
+ bool use_sse3 = platform_specific_bit_;
+ FloatingPointHelper::LoadUnknownsAsIntegers(
+ masm, use_sse3, left_type_, right_type_, &not_floats);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
case Token::BIT_AND: __ and_(eax, ecx); break;
@@ -2145,46 +2160,23 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
case Token::MOD:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2213,7 +2205,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
UNREACHABLE();
}
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
// Floating point case.
switch (op_) {
@@ -2233,7 +2226,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -2250,7 +2243,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- GenerateHeapResultAllocation(masm, &after_alloc_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, &after_alloc_failure, mode_);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2271,8 +2265,11 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::SHL:
case Token::SHR: {
Label non_smi_result;
+ bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3_,
+ use_sse3,
+ BinaryOpIC::GENERIC,
+ BinaryOpIC::GENERIC,
&call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2339,48 +2336,26 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD: {
+ case Token::ADD:
GenerateAddStrings(masm);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- }
+ // Fall through.
case Token::SUB:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
case Token::MUL:
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
+ GenerateCallRuntime(masm);
}
@@ -2416,11 +2391,10 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
@@ -2513,6 +2487,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ bind(&loaded);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope sse4_scope(SSE4_1);
__ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
@@ -2585,6 +2560,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
__ fstp(0);
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
}
@@ -2597,6 +2573,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
__ sub(esp, Immediate(kDoubleSize));
__ movdbl(Operand(esp, 0), xmm1);
@@ -2611,6 +2588,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
if (tagged) {
__ ret(kPointerSize);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
__ Ret();
@@ -2643,6 +2621,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime, 1, 1);
} else { // UNTAGGED.
+ CpuFeatures::Scope scope(SSE2);
__ bind(&runtime_call_clear_stack);
__ bind(&runtime_call);
__ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
@@ -2771,16 +2750,24 @@ void TranscendentalCacheStub::GenerateOperation(
// Input: edx, eax are the left and right objects of a bit op.
// Output: eax, ecx are left and right integers for a bit op.
-void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
- bool use_sse3,
- Label* conversion_failure) {
+// Warning: can clobber inputs even when it jumps to |conversion_failure|!
+void FloatingPointHelper::LoadUnknownsAsIntegers(
+ MacroAssembler* masm,
+ bool use_sse3,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ Label* conversion_failure) {
// Check float operands.
Label arg1_is_object, check_undefined_arg1;
Label arg2_is_object, check_undefined_arg2;
Label load_arg2, done;
// Test if arg1 is a Smi.
- __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(edx, conversion_failure);
+ } else {
+ __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
+ }
__ SmiUntag(edx);
__ jmp(&load_arg2);
@@ -2799,14 +2786,23 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
__ j(not_equal, &check_undefined_arg1);
// Get the untagged integer version of the edx heap number in ecx.
- IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ ConvertHeapNumberToInt32(masm, edx, conversion_failure);
+ } else {
+ IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ }
__ mov(edx, ecx);
// Here edx has the untagged integer, eax has a Smi or a heap number.
__ bind(&load_arg2);
// Test if arg2 is a Smi.
- __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(eax, conversion_failure);
+ } else {
+ __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
+ }
__ SmiUntag(eax);
__ mov(ecx, eax);
@@ -2823,18 +2819,17 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ cmp(ebx, factory->heap_number_map());
__ j(not_equal, &check_undefined_arg2);
-
// Get the untagged integer version of the eax heap number in ecx.
- IntegerConvert(masm, eax, use_sse3, conversion_failure);
- __ bind(&done);
- __ mov(eax, edx);
-}
+ if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ ConvertHeapNumberToInt32(masm, eax, conversion_failure);
+ } else {
+ IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ }
-void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
- bool use_sse3,
- Label* not_int32) {
- return;
+ __ bind(&done);
+ __ mov(eax, edx);
}
@@ -2932,16 +2927,25 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
- __ cvttsd2si(scratch, Operand(xmm0));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm0, xmm2);
- __ j(not_zero, non_int32);
- __ j(carry, non_int32);
- __ cvttsd2si(scratch, Operand(xmm1));
- __ cvtsi2sd(xmm2, scratch);
- __ ucomisd(xmm1, xmm2);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2);
+ CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2);
+}
+
+
+void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
+ Label* non_int32,
+ XMMRegister operand,
+ Register int32_result,
+ Register scratch,
+ XMMRegister xmm_scratch) {
+ __ cvttsd2si(int32_result, Operand(operand));
+ __ cvtsi2sd(xmm_scratch, int32_result);
+ __ pcmpeqd(xmm_scratch, operand);
+ __ movmskps(scratch, xmm_scratch);
+ // Two least significant bits should be both set.
+ __ not_(scratch);
+ __ test(scratch, Immediate(3));
__ j(not_zero, non_int32);
- __ j(carry, non_int32);
}
@@ -3025,12 +3029,6 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
}
-void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
- Label* non_int32) {
- return;
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
CpuFeatures::Scope use_sse2(SSE2);
Factory* factory = masm->isolate()->factory();
@@ -3196,10 +3194,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
- __ fstp(1);
+ __ fstp(1); // 2^X
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
__ test_b(eax, 0x5F); // We check for all but precision exception.
@@ -3299,6 +3297,134 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
+void ArrayLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ Label miss;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
+ __ j(not_equal, &miss);
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
+ support_wrapper_);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : value
+ // -- ecx : name
+ // -- edx : receiver
+ // -- esp[0] : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = edx;
+ Register value = eax;
+ Register scratch = ebx;
+
+ if (kind() == Code::KEYED_LOAD_IC) {
+ __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string()));
+ __ j(not_equal, &miss);
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ push(scratch); // return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::GenerateStoreMiss(masm, kind());
+}
+
+
+void LoadFieldStub::Generate(MacroAssembler* masm) {
+ StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_);
+ __ ret(0);
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in edx and the parameter count is in eax.
@@ -3723,7 +3849,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
static const int kSubjectOffset = 3 * kPointerSize;
static const int kJSRegExpOffset = 4 * kPointerSize;
- Label runtime, invoke_regexp;
+ Label runtime;
+ Factory* factory = masm->isolate()->factory();
// Ensure that a RegExp stack is allocated.
ExternalReference address_of_regexp_stack_memory_address =
@@ -3741,6 +3868,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(eax, &runtime);
__ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
__ j(not_equal, &runtime);
+
// Check that the RegExp has been compiled (data contains a fixed array).
__ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
@@ -3759,156 +3887,124 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// ecx: RegExp data (FixedArray)
// Check that the number of captures fit in the static offsets vector buffer.
__ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since edx is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(edx, Immediate(2)); // edx was a smi.
- // Check that the static offsets vector buffer is large enough.
- __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize);
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2);
__ j(above, &runtime);
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the second argument is a string.
- __ mov(eax, Operand(esp, kSubjectOffset));
- __ JumpIfSmi(eax, &runtime);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
- __ j(NegateCondition(is_string), &runtime);
- // Get the length of the string to ebx.
- __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-
- // ebx: Length of subject string as a smi
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ mov(eax, Operand(esp, kPreviousIndexOffset));
- __ JumpIfNotSmi(eax, &runtime);
- __ cmp(eax, ebx);
- __ j(above_equal, &runtime);
-
- // ecx: RegExp data (FixedArray)
- // edx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ mov(eax, Operand(esp, kLastMatchInfoOffset));
- __ JumpIfSmi(eax, &runtime);
- __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
- __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
- Factory* factory = masm->isolate()->factory();
- __ cmp(eax, factory->fixed_array_map());
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiUntag(eax);
- __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmp(edx, eax);
- __ j(greater, &runtime);
-
// Reset offset for possibly sliced string.
__ Set(edi, Immediate(0));
- // ecx: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
__ mov(eax, Operand(esp, kSubjectOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ mov(edx, eax); // Make a copy of the original subject string.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
+
+ // eax: subject string
+ // edx: subject string
+ // ebx: subject string instance type
+ // ecx: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
__ and_(ebx, kIsNotStringMask |
kStringRepresentationMask |
kStringEncodingMask |
kShortExternalStringMask);
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ASCII string. None of the following
- // string type tests will succeed if subject is not a string or a short
- // external string.
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
__ and_(ebx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
- __ j(zero, &seq_ascii_string, Label::kNear);
-
- // ebx: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(ebx, Immediate(kExternalStringTag));
- __ j(less, &cons_string);
- __ j(equal, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
- __ j(not_zero, &runtime);
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
- // String is sliced.
- __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
- // edi: offset of sliced string, smi-tagged.
- // eax: parent string.
- __ jmp(&check_encoding, Label::kNear);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
__ j(not_equal, &runtime);
__ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
- __ bind(&check_encoding);
+ __ bind(&check_underlying);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // eax: first part of cons string or parent of sliced string.
- // ebx: map of first part of cons string or map of parent of sliced string.
- // Is first part of cons or parent of slice a flat two byte string?
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask | kStringEncodingMask);
+ __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask);
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ASCII or external.
- __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
- kStringRepresentationMask);
- __ j(not_zero, &external_string);
-
- __ bind(&seq_ascii_string);
- // eax: subject string (flat ASCII)
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ test_b(ebx, kStringRepresentationMask);
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8).
+
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
// ecx: RegExp data (FixedArray)
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
__ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
- __ Set(ecx, Immediate(1)); // Type is ASCII.
- __ jmp(&check_code, Label::kNear);
-
- __ bind(&seq_two_byte_string);
- // eax: subject string (flat two byte)
- // ecx: RegExp data (FixedArray)
- __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
- __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ Set(ecx, Immediate(1)); // Type is one byte.
+ // (E) Carry on. String handling is done.
__ bind(&check_code);
+ // edx: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(edx, &runtime);
// eax: subject string
+ // ebx: previous index (smi)
// edx: code
// ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ mov(ebx, Operand(esp, kPreviousIndexOffset));
- __ SmiUntag(ebx); // Previous index from smi.
-
- // eax: subject string
- // ebx: previous index
- // edx: code
- // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
// All checks done. Now push arguments for native regexp code.
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -3939,6 +4035,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
masm->isolate())));
// Argument 2: Previous index.
+ __ SmiUntag(ebx);
__ mov(Operand(esp, 1 * kPointerSize), ebx);
// Argument 1: Original subject string.
@@ -3969,9 +4066,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
__ jmp(&setup_rest, Label::kNear);
@@ -4048,8 +4145,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// edx: Number of capture registers
// Load last_match_info which is still known to be a fast case JSArray.
+ // Check that the fourth object is a JSArray object.
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
+ __ JumpIfSmi(eax, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
__ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, factory->fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
+ __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, eax);
+ __ j(greater, &runtime);
// ebx: last_match_info backing store (FixedArray)
// edx: number of capture registers
@@ -4059,13 +4171,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ SmiUntag(edx); // Number of capture registers back from smi.
// Store last subject and last input.
__ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(ecx, eax);
__ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
__ RecordWriteField(ebx,
RegExpImpl::kLastSubjectOffset,
eax,
edi,
kDontSaveFPRegs);
- __ mov(eax, Operand(esp, kSubjectOffset));
+ __ mov(eax, ecx);
__ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
__ RecordWriteField(ebx,
RegExpImpl::kLastInputOffset,
@@ -4103,10 +4216,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, kLastMatchInfoOffset));
__ ret(4 * kPointerSize);
- // External string. Short external strings have already been ruled out.
- // eax: subject string (expected to be external)
- // ebx: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, &not_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
+ // Reload instance type.
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
if (FLAG_debug_code) {
@@ -4117,16 +4239,41 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
__ test_b(ebx, kStringEncodingMask);
- __ j(not_zero, &seq_ascii_string);
- __ jmp(&seq_two_byte_string);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // eax: sequential subject string (or look-alike, external string)
+ // edx: original subject string
+ // ecx: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ // Load previous index and check range before edx is overwritten. We have
+ // to use edx instead of eax here because it might have been only made to
+ // look like a sequential string when it actually is an external string.
+ __ mov(ebx, Operand(esp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(ebx, &runtime);
+ __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
+ __ Set(ecx, Immediate(0)); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into edi and replace subject string with parent.
+ __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (5a).
#endif // V8_INTERPRETED_REGEXP
}
@@ -4146,8 +4293,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
// Elements: [Map][Length][..elements..]
__ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
- times_half_pointer_size,
- ebx, // In: Number of elements (times 2, being a smi)
+ times_pointer_size,
+ ebx, // In: Number of elements as a smi
+ REGISTER_VALUE_IS_SMI,
eax, // Out: Start of allocation (tagged).
ecx, // Out: End of allocation.
edx, // Scratch register
@@ -4330,30 +4478,59 @@ static int NegativeComparisonResult(Condition cc) {
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsInternalizedMask | kIsNotStringMask);
+ __ cmp(scratch, kInternalizedTag | kStringTag);
+ __ j(not_equal, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
+ Condition cc = GetCondition();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ test(ecx, Immediate(kSmiTagMask));
- __ Assert(not_zero, "Unexpected smi operands.");
- }
+ Label miss;
+ CheckInputType(masm, edx, left_, &miss);
+ CheckInputType(masm, eax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -4382,7 +4559,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&user_equal);
- __ pop(ebx); // Return address.
+ __ pop(ebx); // Return address.
__ push(eax);
__ push(edx);
__ push(ebx);
@@ -4398,67 +4575,61 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ cmp(eax, edx);
__ j(not_equal, &not_identical);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
+ }
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ Set(eax, Immediate(0));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, edx);
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
__ ret(0);
} else {
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
+ Label nan;
+ __ j(above_equal, &nan, Label::kNear);
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc_ == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
- __ ret(0);
- } else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
- __ ret(0);
- }
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ ret(0);
}
__ bind(&not_identical);
@@ -4466,7 +4637,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
- if (cc_ == equal && strict_) {
+ if (cc == equal && strict()) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
@@ -4537,75 +4708,73 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
-
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- __ Set(eax, Immediate(0));
- __ ret(0);
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ ret(0);
- }
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- }
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
__ ret(0);
+ }
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
}
+ __ ret(0);
+
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
- // Fast negative check for symbol-to-symbol equality.
+ // Fast negative check for internalized-to-internalized equality.
Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
- BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
+ if (cc == equal) {
+ BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx);
+ BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx);
// We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax already holds a
+ // are internalized they aren't equal. Register eax already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(0);
}
@@ -4616,7 +4785,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
&check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
@@ -4635,7 +4804,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -4679,11 +4848,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
}
// Restore return address on the stack.
@@ -4692,19 +4861,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -4718,12 +4877,13 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// ebx : cache cell for call target
// edi : the function to call
+ ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
@@ -4756,6 +4916,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // ebx : cache cell for call target
+ // edi : the function to call
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into ecx.
+ __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(ecx, edi);
+ __ j(equal, &done);
+ __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ j(equal, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ LAST_FAST_ELEMENTS_KIND);
+ __ cmp(ecx, Immediate(terminal_kind_sentinel));
+ __ j(above, &miss);
+ // Load the global or builtins object from the current context
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
+ __ jmp(&done, Label::kNear);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ __ LoadGlobalContext(ecx);
+ // Make sure the function is the Array() function
+ __ cmp(edi, Operand(ecx,
+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ GetInitialFastElementsKind());
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
+ Immediate(initial_kind_sentinel));
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// ebx : cache cell for call target
// edi : the function to call
@@ -4787,7 +5023,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Just invoke the function.
@@ -4860,14 +5100,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
- __ jmp(ebx);
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx;
+ __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// edi: called object
// eax: number of arguments
@@ -4902,29 +5148,49 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CEntryStub save_doubles(1, kSaveFPRegs);
+ // Stubs might already be in the snapshot, detect that and don't regenerate,
+ // which would lead to code stub initialization state being messed up.
+ Code* save_doubles_code;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
+ save_doubles_code = *(save_doubles.GetCode(isolate));
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ isolate->set_fp_stubs_generated(true);
+ }
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ __ mov(scratch, value);
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, 0xf);
+ __ cmp(scratch, 0xf);
+ __ j(equal, oom_label);
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -5022,8 +5288,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
- __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
- __ j(equal, throw_out_of_memory_exception);
+ JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
@@ -5105,7 +5370,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Set pending exception and eax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
- __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
+ Label already_have_failure;
+ JumpIfOOM(masm, eax, ecx, &already_have_failure);
+ __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1)));
+ __ bind(&already_have_failure);
__ mov(Operand::StaticVariable(pending_exception), eax);
// Fall through to the next label.
@@ -5449,44 +5717,6 @@ Register InstanceofStub::left() { return eax; }
Register InstanceofStub::right() { return edx; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -5586,10 +5816,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ test(code_,
Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ j(not_zero, &slow_case_);
Factory* factory = masm->isolate()->factory();
@@ -5626,23 +5856,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
void StringAddStub::Generate(MacroAssembler* masm) {
Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
@@ -5712,8 +5925,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
// Handle exceptionally long strings in the runtime system.
__ j(overflow, &call_runtime);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return an internalized string here.
__ cmp(ebx, Immediate(Smi::FromInt(2)));
__ j(not_equal, &longer_than_two);
@@ -5721,13 +5934,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_two_character_string_no_reload;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string_no_reload, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1);
@@ -5739,8 +5952,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5748,7 +5961,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
// Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
+ __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -5822,10 +6035,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(ecx, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&first_prepared);
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5843,10 +6056,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(edi, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&second_prepared);
// Push the addresses of both strings' first characters onto the stack.
@@ -5867,7 +6080,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load first argument's length and first character location. Account for
// values currently on the stack when fetching arguments from it.
__ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -6073,7 +6286,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -6085,7 +6298,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ mov(scratch, c1);
__ sub(scratch, Immediate(static_cast<int>('0')));
@@ -6111,47 +6324,47 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load the symbol table.
- Register symbol_table = c2;
+ // Load the string table.
+ Register string_table = c2;
ExternalReference roots_array_start =
ExternalReference::roots_array_start(masm->isolate());
- __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
- __ mov(symbol_table,
+ __ mov(scratch, Immediate(Heap::kStringTableRootIndex));
+ __ mov(string_table,
Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset));
__ SmiUntag(mask);
__ sub(mask, Immediate(1));
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
- // symbol_table: symbol table
+ // string_table: string table
// mask: capacity mask
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
static const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes], next_probe_pop_mask[kProbes];
Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
__ mov(scratch, hash);
if (i > 0) {
- __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ __ add(scratch, Immediate(StringTable::GetProbeOffset(i)));
}
__ and_(scratch, mask);
- // Load the entry from the symbol table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ // Load the entry from the string table.
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ mov(candidate,
- FieldOperand(symbol_table,
+ FieldOperand(string_table,
scratch,
times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ StringTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
Factory* factory = masm->isolate()->factory();
@@ -6177,10 +6390,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe_pop_mask[i]);
// Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ and_(temp, 0x0000ffff);
__ cmp(chars, temp);
- __ j(equal, &found_in_symbol_table);
+ __ j(equal, &found_in_string_table);
__ bind(&next_probe_pop_mask[i]);
__ pop(mask);
__ bind(&next_probe[i]);
@@ -6191,7 +6404,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ pop(mask); // Pop saved mask from the stack.
if (!result.is(eax)) {
__ mov(eax, result);
@@ -6309,6 +6522,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ ret(3 * kPointerSize);
__ bind(&not_original_string);
+ Label single_char;
+ __ cmp(ecx, Immediate(Smi::FromInt(1)));
+ __ j(equal, &single_char);
+
// eax: string
// ebx: instance type
// ecx: sub string length (smi)
@@ -6405,7 +6622,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -6425,12 +6642,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ pop(esi);
__ pop(ebx);
__ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6479,6 +6696,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ StringCharAtGenerator generator(
+ eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(3 * kPointerSize);
+ generator.SkipSlow(masm, &runtime);
}
@@ -6555,7 +6783,12 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
__ test(length_delta, length_delta);
+#ifndef ENABLE_LATIN_1
__ j(not_zero, &result_not_equal, Label::kNear);
+#else
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
+#endif
// Result is EQUAL.
STATIC_ASSERT(EQUAL == 0);
@@ -6564,8 +6797,19 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ ret(0);
Label result_greater;
+#ifdef ENABLE_LATIN_1
+ Label result_less;
+ __ bind(&length_not_equal);
+ __ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
+#endif
__ bind(&result_not_equal);
+#ifndef ENABLE_LATIN_1
__ j(greater, &result_greater, Label::kNear);
+#else
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
+#endif
// Result is LESS.
__ Set(eax, Immediate(Smi::FromInt(LESS)));
@@ -6591,9 +6835,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -6648,7 +6892,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -6673,32 +6917,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
- __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
- __ j(not_equal, &maybe_undefined2, Label::kNear);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(edx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(eax, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SS2 or CMOV is unsupported.
+ // stub if NaN is involved or SSE2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
- // Load left and right operand
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(eax, &right_smi, Label::kNear);
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm1, ecx);
+
+ __ bind(&left);
+ __ JumpIfSmi(edx, &left_smi, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
+ __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
+ __ SmiUntag(ecx);
+ __ cvtsi2sd(xmm0, ecx);
- // Compare operands
+ __ bind(&done);
+ // Compare operands.
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
@@ -6712,17 +6977,30 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
+ } else {
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
+
+ __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
+ __ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -6739,8 +7017,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -6756,17 +7034,74 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
__ and_(tmp1, right);
__ JumpIfSmi(tmp1, &miss, Label::kNear);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsSymbolMask));
+ __ test(tmp1, Immediate(kIsInternalizedMask));
__ j(zero, &miss, Label::kNear);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmp(left, right);
+ // Make sure eax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(eax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+ Register tmp1 = ecx;
+ Register tmp2 = ebx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ __ mov(tmp1, left);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ and_(tmp1, right);
+ __ JumpIfSmi(tmp1, &miss, Label::kNear);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+
+ Label succeed1;
+ __ test(tmp1, Immediate(kIsInternalizedMask));
+ __ j(not_zero, &succeed1);
+ __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ j(not_equal, &miss);
+ __ bind(&succeed1);
+
+ Label succeed2;
+ __ test(tmp2, Immediate(kIsInternalizedMask));
+ __ j(not_zero, &succeed2);
+ __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE));
+ __ j(not_equal, &miss);
+ __ bind(&succeed2);
+
+ // Unique names are compared by identity.
Label done;
__ cmp(left, right);
// Make sure eax is non-zero. At this point input operands are
@@ -6785,7 +7120,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6827,14 +7162,14 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
__ bind(&not_same);
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized. If they are, we're done
// because we already know they are not identical. But in the case of
// non-equality compare, we still need to determine the order.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp2);
- __ test(tmp1, Immediate(kIsSymbolMask));
+ __ test(tmp1, Immediate(kIsInternalizedMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure eax is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6874,7 +7209,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -6954,14 +7289,14 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be an internalized string and receiver must be a heap object.
void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register properties,
Handle<String> name,
Register r0) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
@@ -6998,10 +7333,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
__ j(equal, &the_hole, Label::kNear);
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- kIsSymbolMask);
+ kIsInternalizedMask);
__ j(zero, miss);
__ bind(&the_hole);
}
@@ -7136,14 +7471,14 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non symbol key during negative lookup
- // we have to bailout as this key might be equal to the
+ // If we hit a key that is not an internalized string during negative
+ // lookup we have to bailout as this key might be equal to the
// key we are looking for.
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
- kIsSymbolMask);
+ kIsInternalizedMask);
__ j(zero, &maybe_in_dictionary);
}
}
@@ -7233,19 +7568,20 @@ bool RecordWriteStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
CpuFeatures::TryForceFeatureScope scope(SSE2);
if (CpuFeatures::IsSupported(SSE2)) {
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7254,7 +7590,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7351,13 +7687,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
- } else {
- ASSERT(mode == INCREMENTAL);
- __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
- __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
- }
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
@@ -7552,6 +7882,22 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ ASSERT(!Serializer::enabled());
+ bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ mov(ebx, MemOperand(ebp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ pop(ecx);
+ __ lea(esp, MemOperand(esp, ebx, times_pointer_size,
+ extra_expression_stack_count_ * kPointerSize));
+ __ jmp(ecx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
ProfileEntryHookStub stub;
@@ -7575,7 +7921,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// Call the entry hook.
int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_);
- __ call(Operand(hook_location, RelocInfo::NONE));
+ __ call(Operand(hook_location, RelocInfo::NONE32));
__ add(esp, Immediate(2 * kPointerSize));
// Restore ecx.
diff --git a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
index 803a711..e6bb38a 100644
--- a/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
+++ b/src/3rdparty/v8/src/ia32/code-stubs-ia32.h
@@ -38,7 +38,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -61,7 +61,7 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -69,7 +69,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -80,7 +80,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -131,9 +131,9 @@ class UnaryOpStub: public CodeStub {
Label::Distance non_smi_near = Label::kFar);
void GenerateSmiCodeUndo(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateNumberStubSub(MacroAssembler* masm);
+ void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@@ -154,96 +154,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_sse3_ = CpuFeatures::IsSupported(SSE3);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_sse3_(SSE3Bits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_sse3_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class SSE3Bits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | SSE3Bits::encode(use_sse3_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -267,15 +177,15 @@ class StringHelper : public AllStatic {
Register scratch, // Neither of above.
bool ascii);
- // Probe the symbol table for a two character string. If the string
+ // Probe the string table for a two character string. If the string
// requires non-standard hashing a jump to the label not_probed is
// performed and registers c1 and c2 are preserved. In all other
// cases they are clobbered. If the string is not found by probing a
// jump to the label not_found is performed. This jump does not
- // guarantee that the string is not in the symbol table. If the
+ // guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in
// register eax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -315,7 +225,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -337,7 +247,7 @@ class StringAddStub: public CodeStub {
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -349,7 +259,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -385,7 +295,7 @@ class StringCompareStub: public CodeStub {
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -410,7 +320,7 @@ class NumberToStringStub: public CodeStub {
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -472,7 +382,7 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -496,7 +406,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -675,7 +585,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotEcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(ecx)) continue;
if (candidate.is(r1)) continue;
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.cc b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
index 4c79519..5368811 100644
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/codegen-ia32.cc
@@ -94,7 +94,45 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(SSE2)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // esp[1 * kPointerSize]: raw double input
+ // esp[0 * kPointerSize]: return address
+ {
+ CpuFeatures::Scope use_sse2(SSE2);
+ XMMRegister input = xmm1;
+ XMMRegister result = xmm2;
+ __ movdbl(input, Operand(esp, 1 * kPointerSize));
+ __ push(eax);
+ __ push(ebx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
+
+ __ pop(ebx);
+ __ pop(eax);
+ __ movdbl(Operand(esp, 1 * kPointerSize), result);
+ __ fld_d(Operand(esp, 1 * kPointerSize));
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -127,7 +165,7 @@ UnaryMathFunction CreateSqrtFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -337,7 +375,7 @@ OS::MemCopyFunction CreateMemCopyFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -351,8 +389,10 @@ OS::MemCopyFunction CreateMemCopyFunction() {
#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -360,6 +400,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_site_info_found != NULL);
+ __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ j(equal, allocation_site_info_found);
+ }
+
// Set transitioned map.
__ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
__ RecordWriteField(edx,
@@ -373,7 +419,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -383,6 +429,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ j(equal, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -397,24 +448,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Allocate new FixedDoubleArray.
// edx: receiver
// edi: length of source FixedArray (smi-tagged)
- __ lea(esi, Operand(edi,
- times_4,
- FixedDoubleArray::kHeaderSize + kPointerSize));
- __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT);
-
- Label aligned, aligned_done;
- __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag));
- __ j(zero, &aligned, Label::kNear);
- __ mov(FieldOperand(eax, 0),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
- __ add(eax, Immediate(kPointerSize));
- __ jmp(&aligned_done);
-
- __ bind(&aligned);
- __ mov(Operand(eax, esi, times_1, -kPointerSize-1),
- Immediate(masm->isolate()->factory()->one_pointer_filler_map()));
-
- __ bind(&aligned_done);
+ AllocationFlags flags =
+ static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
+ __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8,
+ edi, REGISTER_VALUE_IS_SMI,
+ eax, ebx, no_reg, &gc_required, flags);
// eax: destination FixedDoubleArray
// edi: number of elements
@@ -521,7 +559,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- eax : value
// -- ebx : target map
@@ -531,6 +569,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, success;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(edx, edi);
+ __ j(equal, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
@@ -751,7 +794,108 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzx_b(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ test(index, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi index");
+ __ test(value, Immediate(kSmiTagMask));
+ __ Check(zero, "Non-smi value");
+
+ __ cmp(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ cmp(index, Immediate(Smi::FromInt(0)));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiUntag(value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ SmiUntag(index);
+ __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
+static Operand ExpConstant(int index) {
+ return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!input.is(result));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movdbl(double_scratch, ExpConstant(0));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, ExpConstant(1));
+ __ movdbl(result, ExpConstant(2));
+ __ j(above_equal, &done);
+ __ movdbl(double_scratch, ExpConstant(3));
+ __ movdbl(result, ExpConstant(4));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movd(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movdbl(result, ExpConstant(6));
+ __ mulsd(double_scratch, ExpConstant(5));
+ __ subsd(double_scratch, input);
+ __ subsd(result, double_scratch);
+ __ movsd(input, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ mov(temp1, temp2);
+ __ mulsd(result, ExpConstant(7));
+ __ subsd(result, double_scratch);
+ __ add(temp1, Immediate(0x1ff800));
+ __ addsd(result, ExpConstant(8));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, 11);
+ __ shl(temp1, 20);
+ __ movd(input, temp1);
+ __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
+ __ movdbl(double_scratch, Operand::StaticArray(
+ temp2, times_8, ExternalReference::math_exp_log_table()));
+ __ por(input, double_scratch);
+ __ mulsd(result, input);
__ bind(&done);
}
@@ -778,42 +922,6 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
}
-byte* Code::FindPlatformCodeAgeSequence() {
- byte* start = instruction_start();
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (!memcmp(start, young_sequence, young_length) ||
- *start == kCallOpcode) {
- return start;
- } else {
- if (kind() == FUNCTION) {
- byte* start_after_strict =
- start + kSizeOfFullCodegenStrictModePrologue;
- ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
- start[kSizeOfFullCodegenStrictModePrologue] == kCallOpcode);
- return start_after_strict;
- } else {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- start = instruction_start() + kSizeOfOptimizedStrictModePrologue;
- if (!memcmp(start, young_sequence, young_length) ||
- *start == kCallOpcode) {
- return start;
- }
- start = instruction_start() + kSizeOfOptimizedAlignStackPrologue;
- if (!memcmp(start, young_sequence, young_length) ||
- *start == kCallOpcode) {
- return start;
- }
- start = instruction_start() + kSizeOfOptimizedAlignStackPrologue +
- kSizeOfOptimizedStrictModePrologue;
- ASSERT(!memcmp(start, young_sequence, young_length) ||
- *start == kCallOpcode);
- return start;
- }
- }
-}
-
-
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
@@ -849,7 +957,7 @@ void Code::PatchPlatformCodeAge(byte* sequence,
} else {
Code* stub = GetCodeAgeStub(age, parity);
CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE);
+ patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
}
}
diff --git a/src/3rdparty/v8/src/ia32/codegen-ia32.h b/src/3rdparty/v8/src/ia32/codegen-ia32.h
index a783e9a..5137274 100644
--- a/src/3rdparty/v8/src/ia32/codegen-ia32.h
+++ b/src/3rdparty/v8/src/ia32/codegen-ia32.h
@@ -37,10 +37,6 @@ namespace internal {
// Forward declarations
class CompilationInfo;
-static const int kSizeOfFullCodegenStrictModePrologue = 34;
-static const int kSizeOfOptimizedStrictModePrologue = 12;
-static const int kSizeOfOptimizedAlignStackPrologue = 44;
-
// -------------------------------------------------------------------------
// CodeGenerator
@@ -92,6 +88,20 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
index 99ad522..e27ea4c 100644
--- a/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/deoptimizer-ia32.cc
@@ -114,17 +114,19 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- if (!function->IsOptimized()) return;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction* function) {
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
+ AssertNoAllocation no_allocation;
+
+ ASSERT(function->IsOptimized());
+ ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
function->shared()->ClearOptimizedCodeMap();
- Isolate* isolate = function->GetIsolate();
- HandleScope scope(isolate);
- AssertNoAllocation no_allocation;
-
// Get the optimized code.
Code* code = function->code();
Address code_start_address = code->instruction_start();
@@ -155,8 +157,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Patch lazy deoptimization entry.
Address call_address = code_start_address + deopt_data->Pc(i)->value();
CodePatcher patcher(call_address, patch_size());
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE32);
// We use RUNTIME_ENTRY for deoptimization bailouts.
RelocInfo rinfo(call_address + 1, // 1 after the call opcode.
RelocInfo::RUNTIME_ENTRY,
@@ -210,8 +212,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x13;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -224,31 +224,26 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
+ // The back edge bookkeeping code matches the pattern:
//
- // cmp esp, <limit>
- // jae ok
+ // sub <profiling_counter>, <delta>
+ // jns ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // cmp esp, <limit> ;; Not changed
+ // sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -272,13 +267,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
Assembler::set_target_address_at(call_target_address,
check_code->entry());
@@ -307,7 +297,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@@ -344,7 +334,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -455,7 +445,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@@ -473,99 +463,183 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+
+ // | | saved frame (ebp) | | saved frame (ebp) |
+ // | +=========================+<-ebp +=========================+<-ebp
+ // | | JSFunction context | | JSFunction context |
+ // v +-------------------------+ +-------------------------|
+ // | COMPILED_STUB marker | | STUB_FAILURE marker |
+ // +-------------------------+ +-------------------------+
+ // | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
+ // |-------------------------|<-esp +-------------------------+
+ // | caller args pointer |
+ // +-------------------------+
+ // | caller stack param 1 |
+ // parameters in registers +-------------------------+
+ // and spilled to stack | .... |
+ // +-------------------------+
+ // | caller stack param n |
+ // +-------------------------+<-esp
+ // eax = number of parameters
+ // ebx = failure handler address
+ // ebp = saved frame
+ // esi = JSFunction context
+ //
- // Allocate and store the output frame description.
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+
+ // The output frame must have room for all pushed register parameters
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
+ }
+
+ // The stub failure trampoline is a single frame.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ intptr_t top_address = input_->GetRegister(ebp.code()) - (2 * kPointerSize) -
+ height_in_bytes;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ // Read caller's PC (JSFunction continuation) from the input frame.
+ intptr_t input_frame_offset = input_frame_size - kPointerSize;
+ intptr_t output_frame_offset = output_frame_size - kPointerSize;
+ intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Read caller's FP from the input frame, and set this frame's FP.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(ebp.code());
+ output_frame->SetRegister(ebp.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
+ // The context can be gotten from the input frame.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(esi.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
+ // A marker value is used in place of the function.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
+ int caller_arg_count = 0;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ caller_arg_count =
+ input_->GetRegister(descriptor->stack_parameter_count_->code());
}
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
+ // Build the Arguments object for the caller's parameters and a pointer to it.
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ output_frame_offset -= kPointerSize;
+ value = caller_arg_count;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- ASSERT(0 == output_offset);
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr - (output_frame_size - output_frame_offset) -
+ StandardFrameConstants::kMarkerOffset + kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
+ // Copy the register parameters to the failure frame.
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ output_frame_offset -= kPointerSize;
+ DoTranslateCommand(iterator, 0, output_frame_offset);
+ }
+
+ ASSERT(0 == output_frame_offset);
+
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->register_param_count_;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ params++;
+ }
+ output_frame->SetRegister(eax.code(), params);
+ output_frame->SetRegister(ebx.code(), handler);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ int extra = descriptor->extra_expression_stack_count_;
+ StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -576,7 +650,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
@@ -611,7 +685,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
}
@@ -622,7 +696,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -631,7 +705,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -640,7 +714,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
@@ -649,7 +723,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -658,7 +732,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -668,7 +742,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
@@ -682,125 +756,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
BailoutId node_id = BailoutId(iterator->Next());
@@ -815,7 +770,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -893,7 +848,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -916,7 +871,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -936,7 +891,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(esi.code(), value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -949,7 +904,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -997,7 +952,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -1012,7 +967,6 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
- CpuFeatures::Scope scope(SSE2);
Isolate* isolate = masm()->isolate();
@@ -1022,10 +976,13 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = kDoubleSize *
XMMRegister::kNumAllocatableRegisters;
__ sub(esp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ movdbl(Operand(esp, offset), xmm_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movdbl(Operand(esp, offset), xmm_reg);
+ }
}
__ pushad();
@@ -1073,15 +1030,23 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(Operand(ebx, offset));
}
- // Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize;
- __ movdbl(xmm0, Operand(esp, src_offset));
- __ movdbl(Operand(ebx, dst_offset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ // Fill in the double input registers.
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize;
+ __ movdbl(xmm0, Operand(esp, src_offset));
+ __ movdbl(Operand(ebx, dst_offset), xmm0);
+ }
}
+ // Clear FPU all exceptions.
+ // TODO(ulan): Find out why the TOP register is not zero here in some cases,
+ // and check that the generated code never deoptimizes with unbalanced stack.
+ __ fnclex();
+
// Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
__ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
@@ -1098,10 +1063,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(edx, Operand(ebx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(edx, 0));
__ add(edx, Immediate(sizeof(uint32_t)));
+ __ bind(&pop_loop_header);
__ cmp(ecx, esp);
__ j(not_equal, &pop_loop);
@@ -1139,27 +1107,33 @@ void Deoptimizer::EntryGenerator::Generate() {
}
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: eax = current FrameDescription**, edx = one past the
// last FrameDescription**.
__ mov(edx, Operand(eax, Deoptimizer::output_count_offset()));
__ mov(eax, Operand(eax, Deoptimizer::output_offset()));
__ lea(edx, Operand(eax, edx, times_4, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: ebx = current FrameDescription*, ecx = loop index.
__ mov(ebx, Operand(eax, 0));
__ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ sub(ecx, Immediate(sizeof(uint32_t)));
__ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ test(ecx, ecx);
__ j(not_zero, &inner_push_loop);
__ add(eax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmp(eax, edx);
__ j(below, &outer_push_loop);
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
+ // In case of OSR or a failed STUB, we have to restore the XMM registers.
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int src_offset = i * kDoubleSize + double_regs_offset;
diff --git a/src/3rdparty/v8/src/ia32/disasm-ia32.cc b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
index dd07deb..1193f2a 100644
--- a/src/3rdparty/v8/src/ia32/disasm-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/disasm-ia32.cc
@@ -871,6 +871,7 @@ static const char* F0Mnem(byte f0byte) {
case 0xAF: return "imul";
case 0xA5: return "shld";
case 0xAD: return "shrd";
+ case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
default: return NULL;
}
@@ -1041,6 +1042,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0x50) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s,%s",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
diff --git a/src/3rdparty/v8/src/ia32/frames-ia32.h b/src/3rdparty/v8/src/ia32/frames-ia32.h
index 18915e2..5bd102a 100644
--- a/src/3rdparty/v8/src/ia32/frames-ia32.h
+++ b/src/3rdparty/v8/src/ia32/frames-ia32.h
@@ -97,22 +97,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- // StandardFrame::IterateExpressions assumes that kContextOffset is the last
- // object pointer.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
index c58f242..733d977 100644
--- a/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/full-codegen-ia32.cc
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -127,7 +127,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -138,8 +138,6 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
- Label start;
- __ bind(&start);
__ test(ecx, ecx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
@@ -151,8 +149,6 @@ void FullCodeGenerator::Generate() {
__ mov(Operand(esp, receiver_offset),
Immediate(isolate()->factory()->undefined_value()));
__ bind(&ok);
- ASSERT(!FLAG_age_code ||
- (kSizeOfFullCodegenStrictModePrologue == ok.pos() - start.pos()));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -160,6 +156,7 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ info->set_prologue_offset(masm_->pc_offset());
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -333,39 +330,27 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- // Count based interrupts happen often enough when they are enabled
- // that the additional stack checks are not necessary (they would
- // only check for interrupts).
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -374,9 +359,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -759,8 +742,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
+ // The variable in the declaration always resides in the current context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
@@ -788,7 +770,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
globals_->Add(variable->binding_needs_init()
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(), zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
case Variable::PARAMETER:
@@ -848,7 +831,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
}
@@ -891,34 +875,32 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ mov(ContextOperand(esi, variable->index()), Immediate(instance));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
+ __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ mov(ContextOperand(esi, variable->index()), eax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(esi,
+ Context::SlotOffset(variable->index()),
+ eax,
+ ecx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -953,13 +935,21 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
- __ push(Immediate(pairs));
- __ push(Immediate(Smi::FromInt(DeclareGlobalsFlags())));
+ __ Push(pairs);
+ __ Push(Smi::FromInt(DeclareGlobalsFlags()));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1010,7 +1000,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
__ test(eax, eax);
@@ -1127,7 +1117,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(ebx, cell);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
@@ -1201,7 +1192,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1299,7 +1290,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ mov(edx, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(edx, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1354,9 +1347,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -1383,7 +1376,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in ecx and the global
// object in eax.
- __ mov(edx, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(edx, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(ecx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1541,24 +1536,34 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_properties));
+ __ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
}
@@ -1590,7 +1595,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(ecx, Immediate(key->handle()));
@@ -1679,6 +1684,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
@@ -1688,12 +1694,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
- FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ if (has_constant_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1898,7 +1911,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ mov(eax, ecx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1984,7 +1997,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(edx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(eax);
@@ -1992,7 +2005,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2052,7 +2065,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(ecx, var->name());
- __ mov(edx, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ mov(edx, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2336,7 +2351,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2373,7 +2388,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ push(proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ push(proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2478,7 +2495,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(ebx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(eax);
}
@@ -2559,6 +2576,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+ // TODO(rossberg): incorporate symbols.
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2633,7 +2651,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ cmp(ecx, FACTORY->hash_table_map());
__ j(equal, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf string in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2655,11 +2673,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ add(ebx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
+ // internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
__ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, FACTORY->value_of_symbol());
+ __ cmp(edx, FACTORY->value_of_string());
__ j(equal, if_false);
__ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@@ -2694,6 +2712,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
+void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(eax, if_false);
+ __ CmpObjectType(eax, SYMBOL_TYPE, ebx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2896,12 +2936,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ mov(eax, isolate()->factory()->function_class_symbol());
+ __ mov(eax, isolate()->factory()->function_class_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ mov(eax, isolate()->factory()->Object_symbol());
+ __ mov(eax, isolate()->factory()->Object_string());
__ jmp(&done);
// Non-JS objects have class null.
@@ -3078,6 +3118,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(ecx);
+ __ pop(ebx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(ecx);
+ __ pop(ebx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3587,7 +3659,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
__ add(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ add(index, Immediate(1));
__ cmp(index, array_length);
@@ -3623,7 +3695,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
+ __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
@@ -3637,11 +3709,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3666,7 +3738,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ bind(&loop_1_condition);
@@ -3679,7 +3751,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
@@ -3707,7 +3779,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3736,7 +3808,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
@@ -3748,7 +3820,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3830,7 +3902,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
- __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ push(var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ push(Immediate(var->name()));
__ push(Immediate(Smi::FromInt(kNonStrictMode)));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -3956,7 +4030,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register eax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(eax);
}
@@ -4078,7 +4152,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ mov(edx, eax);
__ mov(eax, Immediate(Smi::FromInt(1)));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4152,7 +4228,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ mov(edx, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ mov(edx, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ mov(ecx, Immediate(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4197,12 +4275,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(eax, if_true);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(eax, if_false);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, if_false);
@@ -4210,16 +4288,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
1 << Map::kIsUndetectable);
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ cmp(eax, isolate()->factory()->true_value());
__ j(equal, if_true);
__ cmp(eax, isolate()->factory()->false_value());
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ cmp(eax, isolate()->factory()->null_value());
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ cmp(eax, isolate()->factory()->undefined_value());
__ j(equal, if_true);
__ JumpIfSmi(eax, if_false);
@@ -4228,19 +4306,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
__ test(ecx, Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(eax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
__ j(equal, if_true);
__ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(eax, if_false);
if (!FLAG_harmony_typeof) {
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
}
+ if (FLAG_harmony_symbols) {
+ __ CmpObjectType(eax, SYMBOL_TYPE, edx);
+ __ j(equal, if_true);
+ }
__ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
__ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -4297,29 +4379,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4336,7 +4396,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
diff --git a/src/3rdparty/v8/src/ia32/ic-ia32.cc b/src/3rdparty/v8/src/ia32/ic-ia32.cc
index dae3bbd..428d830 100644
--- a/src/3rdparty/v8/src/ia32/ic-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/ic-ia32.cc
@@ -100,7 +100,7 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// storage. This function may fail to load a property even though it is
// in the dictionary, so code at miss_label must always call a backup
// property load that is complete. This function is safe to call if
-// name is not a symbol, and will jump to the miss_label in that
+// name is not internalized, and will jump to the miss_label in that
// case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
@@ -157,7 +157,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// storage. This function may fail to store a property eventhough it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
+// call if name is not internalized, and will jump to the miss_label in
// that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
@@ -216,50 +216,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -336,31 +292,31 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
+// Checks whether a key is an array index string or an internalized string.
+// Falls through if the key is an internalized string.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
- Label* not_symbol) {
+ Label* not_internalized) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
__ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
+ __ j(above_equal, not_internalized);
// Is the string an array index, with cached numeric value?
__ mov(hash, FieldOperand(key, String::kHashFieldOffset));
__ test(hash, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, index_string);
- // Is the string a symbol?
- STATIC_ASSERT(kSymbolTag != 0);
- __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask);
- __ j(zero, not_symbol);
+ // Is the string internalized?
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask);
+ __ j(zero, not_internalized);
}
@@ -528,7 +484,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ xor_(eax, edi);
__ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -647,7 +603,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -689,7 +645,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
__ TailCallExternalReference(ref, 2, 1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -714,7 +670,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ mov(eax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -743,7 +699,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -835,7 +791,9 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -846,7 +804,9 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -860,7 +820,8 @@ static void KeyedStoreGenerateGenericHelper(
ebx,
edi,
slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1237,7 +1198,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&check_string);
GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be an internalized string.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -1263,7 +1224,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor an internalized string,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
@@ -1334,9 +1295,10 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx,
- eax);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, edx, ecx, ebx, eax);
// Cache miss: Jump to runtime.
GenerateMiss(masm);
@@ -1385,7 +1347,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -1400,7 +1362,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(ebx); // return address
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
@@ -1465,65 +1427,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- eax : value
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = edx;
- Register value = eax;
- Register scratch = ebx;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : value
@@ -1598,7 +1501,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : key
@@ -1613,7 +1516,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(ebx);
// Do tail-call to runtime routine.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1650,7 +1553,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
@@ -1676,7 +1581,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ mov(eax, edx);
__ Ret();
__ bind(&fail);
@@ -1715,7 +1622,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1726,40 +1633,6 @@ static bool HasInlinedSmiCode(Address address) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
index 34ce1cd..8ef3bdf 100644
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "ia32/lithium-codegen-ia32.h"
+#include "ic.h"
#include "code-stubs.h"
#include "deoptimizer.h"
#include "stub-cache.h"
@@ -70,22 +71,23 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatures::Scope scope(SSE2);
-
- CodeStub::GenerateFPStubs();
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 &&
- !chunk()->graph()->is_recursive()) ||
- !info()->osr_ast_id().IsNone();
+ support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+ dynamic_frame_alignment_ = info()->IsOptimizing() &&
+ ((chunk()->num_double_slots() > 2 &&
+ !chunk()->graph()->is_recursive()) ||
+ !info()->osr_ast_id().IsNone());
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateJumpTable() &&
GenerateSafepointTable();
}
@@ -94,8 +96,17 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
- Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ if (!info()->IsStub()) {
+ Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+ }
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
}
@@ -126,123 +137,146 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). ecx is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label begin;
- __ bind(&begin);
- Label ok;
- __ test(ecx, Operand(ecx));
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ mov(Operand(esp, receiver_offset),
- Immediate(isolate()->factory()->undefined_value()));
- __ bind(&ok);
- ASSERT(!FLAG_age_code ||
- (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos()));
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). ecx is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ test(ecx, Operand(ecx));
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ mov(Operand(esp, receiver_offset),
+ Immediate(isolate()->factory()->undefined_value()));
+ __ bind(&ok);
+ }
+
+ if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+ // Move state of dynamic frame alignment into edx.
+ __ mov(edx, Immediate(kNoAlignmentPadding));
+
+ Label do_not_pad, align_loop;
+ STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+ // Align esp + 4 to a multiple of 2 * kPointerSize.
+ __ test(esp, Immediate(kPointerSize));
+ __ j(not_zero, &do_not_pad, Label::kNear);
+ __ push(Immediate(0));
+ __ mov(ebx, esp);
+ __ mov(edx, Immediate(kAlignmentPaddingPushed));
+ // Copy arguments, receiver, and return address.
+ __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+ __ bind(&align_loop);
+ __ mov(eax, Operand(ebx, 1 * kPointerSize));
+ __ mov(Operand(ebx, 0), eax);
+ __ add(Operand(ebx), Immediate(kPointerSize));
+ __ dec(ecx);
+ __ j(not_zero, &align_loop, Label::kNear);
+ __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+ __ bind(&do_not_pad);
+ }
}
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(ebp); // Caller's frame pointer.
+ __ mov(ebp, esp);
+ __ push(esi); // Callee's context.
+ if (info()->IsStub()) {
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ } else {
+ __ push(edi); // Callee's JS function.
+ }
+ }
- if (dynamic_frame_alignment_) {
- Label begin;
- __ bind(&begin);
- // Move state of dynamic frame alignment into edx.
- __ mov(edx, Immediate(kNoAlignmentPadding));
-
- Label do_not_pad, align_loop;
- STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
- // Align esp + 4 to a multiple of 2 * kPointerSize.
- __ test(esp, Immediate(kPointerSize));
- __ j(not_zero, &do_not_pad, Label::kNear);
- __ push(Immediate(0));
- __ mov(ebx, esp);
- __ mov(edx, Immediate(kAlignmentPaddingPushed));
- // Copy arguments, receiver, and return address.
- __ mov(ecx, Immediate(scope()->num_parameters() + 2));
-
- __ bind(&align_loop);
- __ mov(eax, Operand(ebx, 1 * kPointerSize));
- __ mov(Operand(ebx, 0), eax);
- __ add(Operand(ebx), Immediate(kPointerSize));
- __ dec(ecx);
- __ j(not_zero, &align_loop, Label::kNear);
- __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
- __ bind(&do_not_pad);
- ASSERT(!FLAG_age_code ||
- (kSizeOfOptimizedAlignStackPrologue ==
- do_not_pad.pos() - begin.pos()));
- }
-
- __ push(ebp); // Caller's frame pointer.
- __ mov(ebp, esp);
- __ push(esi); // Callee's context.
- __ push(edi); // Callee's JS function.
-
- if (dynamic_frame_alignment_ && FLAG_debug_code) {
+ if (info()->IsOptimizing() &&
+ dynamic_frame_alignment_ &&
+ FLAG_debug_code) {
__ test(esp, Immediate(kPointerSize));
__ Assert(zero, "frame is expected to be aligned");
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
- ASSERT_GE(slots, 1);
- if (slots == 1) {
- if (dynamic_frame_alignment_) {
- __ push(edx);
- } else {
- __ push(Immediate(kNoAlignmentPadding));
- }
- } else {
- if (FLAG_debug_code) {
- __ mov(Operand(eax), Immediate(slots));
- Label loop;
- __ bind(&loop);
- __ push(Immediate(kSlotsZapValue));
- __ dec(eax);
- __ j(not_zero, &loop);
+ ASSERT(slots != 0 || !info()->IsOptimizing());
+ if (slots > 0) {
+ if (slots == 1) {
+ if (dynamic_frame_alignment_) {
+ __ push(edx);
+ } else {
+ __ push(Immediate(kNoAlignmentPadding));
+ }
} else {
- __ sub(Operand(esp), Immediate(slots * kPointerSize));
- #ifdef _MSC_VER
- // On windows, you may not access the stack more than one page below
- // the most recently mapped page. To make the allocated area randomly
- // accessible, we write to each page in turn (the value is irrelevant).
- const int kPageSize = 4 * KB;
- for (int offset = slots * kPointerSize - kPageSize;
- offset > 0;
- offset -= kPageSize) {
- __ mov(Operand(esp, offset), eax);
+ if (FLAG_debug_code) {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+ __ push(eax);
+ __ mov(Operand(eax), Immediate(slots));
+ Label loop;
+ __ bind(&loop);
+ __ mov(MemOperand(esp, eax, times_4, 0),
+ Immediate(kSlotsZapValue));
+ __ dec(eax);
+ __ j(not_zero, &loop);
+ __ pop(eax);
+ } else {
+ __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+ // On windows, you may not access the stack more than one page below
+ // the most recently mapped page. To make the allocated area randomly
+ // accessible, we write to each page in turn (the value is irrelevant).
+ const int kPageSize = 4 * KB;
+ for (int offset = slots * kPointerSize - kPageSize;
+ offset > 0;
+ offset -= kPageSize) {
+ __ mov(Operand(esp, offset), eax);
+ }
+#endif
+ }
+
+ if (support_aligned_spilled_doubles_) {
+ Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+ // Store dynamic frame alignment state in the first local.
+ int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+ if (dynamic_frame_alignment_) {
+ __ mov(Operand(ebp, offset), edx);
+ } else {
+ __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+ }
}
- #endif
}
- // Store dynamic frame alignment state in the first local.
- if (dynamic_frame_alignment_) {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- edx);
- } else {
- __ mov(Operand(ebp,
- JavaScriptFrameConstants::kDynamicAlignmentStateOffset),
- Immediate(kNoAlignmentPadding));
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ Comment(";;; Save clobbered callee double registers");
+ CpuFeatures::Scope scope(SSE2);
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movdbl(MemOperand(esp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
}
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
@@ -281,7 +315,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// We have not executed any compiled code yet, so esi still holds the
// incoming context.
__ CallRuntime(Runtime::kTraceEnter, 0);
@@ -303,7 +337,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -312,16 +369,111 @@ bool LCodeGen::GenerateBody() {
}
+bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
+ for (int i = 0; i < jump_table_.length(); i++) {
+ __ bind(&jump_table_[i].label);
+ Address entry = jump_table_[i].address;
+ bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
+ Deoptimizer::BailoutType type =
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+ if (is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 2 * kPointerSize));
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
+ }
+ }
+ } else {
+ if (is_lazy_deopt) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
+ }
+ return !is_aborted();
+}
+
+
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(ebp); // Caller's frame pointer.
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ __ lea(ebp, Operand(esp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
__ jmp(code->exit());
}
}
@@ -335,6 +487,15 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
ASSERT(is_done());
+ if (!info()->IsStub()) {
+ // For lazy deoptimization we need space to patch a call after every call.
+ // Ensure there is always space for such patching, even if the code ends
+ // in a call.
+ int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+ while (masm()->pc_offset() < target_offset) {
+ masm()->nop();
+ }
+ }
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
@@ -350,6 +511,11 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const {
}
+bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
+ return op->IsDoubleRegister();
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
@@ -364,8 +530,6 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
@@ -393,30 +557,20 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
if (op->IsRegister()) return Operand(ToRegister(op));
if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(ebp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(ebp, -(index - 1) * kPointerSize);
- }
+ return Operand(ebp, StackSlotOffset(op->index()));
}
Operand LCodeGen::HighOperand(LOperand* op) {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- int offset = (index >= 0) ? index + 3 : index - 1;
- return Operand(ebp, -offset * kPointerSize);
+ return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ int* pushed_arguments_index,
+ int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -428,14 +582,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
+ *pushed_arguments_index = -environment->parameter_count();
+ *pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ pushed_arguments_index,
+ pushed_arguments_count);
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
switch (environment->frame_type()) {
@@ -458,16 +614,28 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
+ default:
+ UNREACHABLE();
}
// Inlined frames which push their arguments cause the index to be
- // bumped and another stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
+ // bumped and another stack area to be used for materialization,
+ // otherwise actual argument values are unknown for inlined frames.
+ bool arguments_known = true;
+ int arguments_index = *pushed_arguments_index;
+ int arguments_count = *pushed_arguments_count;
+ if (environment->entry() != NULL) {
+ arguments_known = environment->entry()->arguments_pushed();
+ arguments_index = arguments_index < 0
+ ? GetStackSlotCount() : arguments_index + arguments_count;
+ arguments_count = environment->entry()->arguments_count() + 1;
+ if (environment->entry()->arguments_pushed()) {
+ *pushed_arguments_index = arguments_index;
+ *pushed_arguments_count = arguments_count;
+ }
}
for (int i = 0; i < translation_size; ++i) {
@@ -482,8 +650,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -493,8 +662,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_double_registers()[value->index()],
false,
false,
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -502,8 +672,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -512,13 +683,15 @@ void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
+ translation->StoreArgumentsObject(
+ arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -592,13 +765,12 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun,
__ CallRuntime(fun, argc);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+ ASSERT(info()->is_calling());
}
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
- int argc,
- LInstruction* instr,
- LOperand* context) {
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(esi)) {
__ mov(esi, ToRegister(context));
@@ -612,10 +784,19 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else {
UNREACHABLE();
}
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+ int argc,
+ LInstruction* instr,
+ LOperand* context) {
+ LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+ ASSERT(info()->is_calling());
}
@@ -661,7 +842,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
@@ -695,20 +881,64 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
__ popfd();
}
- if (cc == no_condition) {
- if (FLAG_trap_on_deopt) __ int3();
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- if (FLAG_trap_on_deopt) {
- Label done;
+ if (FLAG_trap_on_deopt) {
+ Label done;
+ if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
- __ int3();
+ }
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool needs_lazy_deopt = info()->IsStub();
+ if (cc == no_condition && frame_is_built_) {
+ if (needs_lazy_deopt) {
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
__ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- __ bind(&done);
+ }
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (jump_table_.is_empty() ||
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
} else {
- __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ __ j(cc, &jump_table_.last().label);
+ }
+ }
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
}
}
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded maps after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
}
@@ -872,38 +1102,38 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -997,6 +1227,17 @@ void LCodeGen::DoModI(LModI* instr) {
// Slow case, using idiv instruction.
__ bind(&slow);
+
+ // Check for (kMinInt % -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmp(left_reg, kMinInt);
+ __ j(not_zero, &left_not_min_int, Label::kNear);
+ __ cmp(right_reg, -1);
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
// Sign extend to edx.
__ cdq();
@@ -1030,6 +1271,43 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
+ if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ test(dividend, Operand(dividend));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmp(dividend, kMinInt);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
+ }
+
+ if (test_value != 0) {
+ // Deoptimize if remainder is not 0.
+ __ test(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sar(dividend, power);
+ }
+
+ if (divisor < 0) __ neg(dividend);
+
+ return;
+ }
+
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->left()).is(eax));
@@ -1040,13 +1318,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ test(right_reg, ToOperand(right));
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ test(left_reg, Operand(left_reg));
__ j(not_zero, &left_not_zero, Label::kNear);
@@ -1055,8 +1333,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left_reg, kMinInt);
__ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1069,9 +1347,19 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idiv(right_reg);
- // Deoptimize if remainder is not 0.
- __ test(edx, Operand(edx));
- DeoptimizeIf(not_zero, instr->environment());
+ if (!instr->is_flooring()) {
+ // Deoptimize if remainder is not 0.
+ __ test(edx, Operand(edx));
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ Label done;
+ __ test(edx, edx);
+ __ j(zero, &done, Label::kNear);
+ __ xor_(edx, right_reg);
+ __ sar(edx, 31);
+ __ add(eax, edx);
+ __ bind(&done);
+ }
}
@@ -1408,7 +1696,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatures::Scope scope(SSE4_1);
+ CpuFeatures::Scope scope1(SSE2);
+ CpuFeatures::Scope scope2(SSE4_1);
if (lower != 0) {
__ Set(temp, Immediate(lower));
__ movd(res, Operand(temp));
@@ -1420,6 +1709,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
__ pinsrd(res, Operand(temp), 1);
}
} else {
+ CpuFeatures::Scope scope(SSE2);
__ Set(temp, Immediate(upper));
__ movd(res, Operand(temp));
__ psllq(res, 32);
@@ -1536,6 +1826,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -1573,6 +1872,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* left = instr->left();
LOperand* right = instr->right();
ASSERT(left->Equals(instr->result()));
@@ -1634,6 +1934,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
@@ -1644,8 +1945,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ addsd(left, right);
break;
case Token::SUB:
- __ subsd(left, right);
- break;
+ __ subsd(left, right);
+ break;
case Token::MUL:
__ mulsd(left, right);
break;
@@ -1683,7 +1984,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1718,6 +2019,7 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
void LCodeGen::DoBranch(LBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
+ CpuFeatures::Scope scope(SSE2);
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
@@ -1814,9 +2116,8 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(FieldOperand(reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(not_equal, &not_heap_number, Label::kNear);
- __ fldz();
- __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
- __ FCmp();
+ __ xorps(xmm0, xmm0);
+ __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, false_label);
__ jmp(true_label);
__ bind(&not_heap_number);
@@ -1888,6 +2189,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatures::Scope scope(SSE2);
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2084,7 +2386,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2167,7 +2469,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
ASSERT(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2197,7 +2499,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2208,12 +2510,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ mov(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
__ cmp(temp, class_name);
// End with the answer in the z flag.
}
@@ -2252,7 +2554,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
// Object and function are in fixed registers defined by the stub.
ASSERT(ToRegister(instr->context()).is(esi));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ test(eax, Operand(eax));
@@ -2353,7 +2655,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
__ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2367,10 +2669,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2386,7 +2696,7 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
@@ -2395,13 +2705,28 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
+ ASSERT(NeedsEagerFrame());
+ CpuFeatures::Scope scope(SSE2);
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(esp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
if (dynamic_frame_alignment_) {
// Fetch the state of the dynamic frame alignment.
__ mov(edx, Operand(ebp,
JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
}
- __ mov(esp, ebp);
- __ pop(ebp);
+ if (NeedsEagerFrame()) {
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ }
if (dynamic_frame_alignment_) {
Label no_padding;
__ cmp(edx, Immediate(kNoAlignmentPadding));
@@ -2414,7 +2739,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ Ret((GetParameterCount() + 2) * kPointerSize, ecx);
__ bind(&no_padding);
}
- __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ if (info()->IsStub()) {
+ __ Ret();
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, ecx);
+ }
}
@@ -2776,22 +3105,37 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- if (ExternalArrayOpRequiresTemp<HLoadKeyed>(instr->hydrogen())) {
- __ SmiUntag(ToRegister(instr->key()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else {
+ __ fld_s(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ __ fld_d(operand);
+ HandleX87FPReturnValue(instr);
+ }
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
@@ -2835,9 +3179,30 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
- XMMRegister result = ToDoubleRegister(instr->result());
+void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
+ if (IsX87TopOfStack(instr->result())) {
+ // Return value is already on stack. If the value has no uses, then
+ // pop it off the FP stack. Otherwise, make sure that there are enough
+ // copies of the value on the stack to feed all of the usages, e.g.
+ // when the following instruction uses the return value in multiple
+ // inputs.
+ int count = instr->hydrogen_value()->UseCount();
+ if (count == 0) {
+ __ fstp(0);
+ } else {
+ count--;
+ ASSERT(count <= 7);
+ while (count-- > 0) {
+ __ fld(0);
+ }
+ }
+ } else {
+ __ fstp_d(ToOperand(instr->result()));
+ }
+}
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -2858,7 +3223,14 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- __ movdbl(result, double_load_operand);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister result = ToDoubleRegister(instr->result());
+ __ movdbl(result, double_load_operand);
+ } else {
+ __ fld_d(double_load_operand);
+ HandleX87FPReturnValue(instr);
+ }
}
@@ -2907,13 +3279,6 @@ Operand LCodeGen::BuildFastArrayOperand(
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
- // Even though the HLoad/StoreKeyed instructions force the input
- // representation for the key to be an integer, the input gets replaced during
- // bound check elimination with the index argument to the bounds check, which
- // can be tagged, so that case must be handled here, too.
- if (key_representation.IsTagged() && (shift_size >= 1)) {
- shift_size -= kSmiTagSize;
- }
if (key->IsConstantOperand()) {
int constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
@@ -2923,6 +3288,10 @@ Operand LCodeGen::BuildFastArrayOperand(
((constant_value + additional_index) << shift_size)
+ offset);
} else {
+ // Take the tag bit into account while computing the shift size.
+ if (key_representation.IsTagged() && (shift_size >= 1)) {
+ shift_size -= kSmiTagSize;
+ }
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
@@ -3102,7 +3471,12 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
- __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ if (info()->IsOptimizing()) {
+ __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ // If there is no frame, the context must be in esi.
+ ASSERT(result.is(esi));
+ }
}
@@ -3127,9 +3501,10 @@ void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result,
- Operand(context, Context::SlotOffset(instr->qml_global()
- ? Context::QML_GLOBAL_OBJECT_INDEX
- : Context::GLOBAL_OBJECT_INDEX)));
+ Operand(context,
+ Context::SlotOffset(instr->qml_global()
+ ? Context::QML_GLOBAL_OBJECT_INDEX
+ : Context::GLOBAL_OBJECT_INDEX)));
}
@@ -3276,6 +3651,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
ASSERT(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
+ CpuFeatures::Scope scope(SSE2);
if (r.IsDouble()) {
XMMRegister scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3297,6 +3673,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3360,45 +3737,61 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- XMMRegister xmm_scratch = xmm0;
+void LCodeGen::DoMathRound(LMathRound* instr) {
+ CpuFeatures::Scope scope(SSE2);
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
-
- Label below_half, done;
- // xmm_scratch = 0.5
+ XMMRegister xmm_scratch = xmm0;
+ XMMRegister input_temp = ToDoubleRegister(instr->temp());
ExternalReference one_half = ExternalReference::address_of_one_half();
+ ExternalReference minus_one_half =
+ ExternalReference::address_of_minus_one_half();
+
+ Label done, round_to_zero, below_one_half, do_not_compensate;
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
- __ j(above, &below_half);
- // xmm_scratch = input + 0.5
- __ addsd(xmm_scratch, input_reg);
+ __ j(above, &below_one_half);
- // Compute Math.floor(value + 0.5).
- // Use truncating instruction (OK because input is positive).
+ // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
+ __ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
-
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
+ __ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
__ jmp(&done);
- __ bind(&below_half);
+ __ bind(&below_one_half);
+ __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &round_to_zero);
+
+ // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+ // compare and compensate.
+ __ movsd(input_temp, input_reg); // Do not alter input_reg.
+ __ subsd(input_temp, xmm_scratch);
+ __ cvttsd2si(output_reg, Operand(input_temp));
+ // Catch minint due to overflow, and to prevent overflow when compensating.
+ __ cmp(output_reg, 0x80000000u);
+ __ RecordComment("D2I conversion overflow");
+ DeoptimizeIf(equal, instr->environment());
+ __ cvtsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(xmm_scratch, input_temp);
+ __ j(equal, &done);
+ __ sub(output_reg, Immediate(1));
+ // No overflow because we already ruled out minint.
+ __ jmp(&done);
+
+ __ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// If the sign is positive, we return +0.
__ movmskpd(output_reg, input_reg);
__ test(output_reg, Immediate(1));
+ __ RecordComment("Minus zero");
DeoptimizeIf(not_zero, instr->environment());
- } else {
- // If the input is >= -0.5, we return +0.
- __ mov(output_reg, Immediate(0xBF000000));
- __ movd(xmm_scratch, Operand(output_reg));
- __ cvtss2sd(xmm_scratch, xmm_scratch);
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
}
__ Set(output_reg, Immediate(0));
__ bind(&done);
@@ -3406,6 +3799,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
@@ -3413,6 +3807,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register scratch = ToRegister(instr->temp());
@@ -3489,6 +3884,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
+ CpuFeatures::Scope scope(SSE2);
// Having marked this instruction as a call we can use any
// registers.
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
@@ -3556,6 +3952,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
Label positive, done, zero;
@@ -3586,11 +3983,22 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+}
+
+
void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3598,7 +4006,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3606,7 +4014,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3618,9 +4026,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathFloor:
DoMathFloor(instr);
break;
- case kMathRound:
- DoMathRound(instr);
- break;
case kMathSqrt:
DoMathSqrt(instr);
break;
@@ -3697,7 +4102,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3729,9 +4134,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(edi));
ASSERT(ToRegister(instr->result()).is(eax));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ mov(ebx, Immediate(undefined_value));
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ Set(eax, Immediate(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->context()).is(esi));
+ ASSERT(ToRegister(instr->constructor()).is(edi));
+ ASSERT(ToRegister(instr->result()).is(eax));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
+ __ mov(ebx, instr->hydrogen()->property_cell());
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+ __ Set(eax, Immediate(instr->arity()));
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3813,27 +4238,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ test(ToRegister(operand), Immediate(kSmiTagMask));
- } else {
- __ test(ToOperand(operand), Immediate(kSmiTagMask));
- }
- DeoptimizeIf(not_zero, environment);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -3853,20 +4260,25 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- if (ExternalArrayOpRequiresTemp<HStoreKeyed>(instr->hydrogen())) {
- __ SmiUntag(ToRegister(instr->key()));
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
- instr->key(),
+ key,
instr->hydrogen()->key()->representation(),
elements_kind,
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
__ movss(operand, xmm0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(SSE2);
__ movdbl(operand, ToDoubleRegister(instr->value()));
} else {
Register value = ToRegister(instr->value());
@@ -3902,6 +4314,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value = ToDoubleRegister(instr->value());
if (instr->NeedsCanonicalization()) {
@@ -3983,14 +4396,21 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ __ TestJSArrayForAllocationSiteInfo(object, temp);
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
bool is_simple_map_transition =
@@ -4000,7 +4420,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable, branch_distance);
if (is_simple_map_transition) {
- Register object_reg = ToRegister(instr->object());
+ Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> map = instr->hydrogen()->transitioned_map();
__ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
Immediate(map));
@@ -4009,8 +4429,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ RecordWriteForMap(object_reg, to_map, new_map_reg,
ToRegister(instr->temp()),
kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this);
+ if (!object_reg.is(eax)) {
+ __ push(object_reg);
+ }
+ LoadContextFromDeferred(instr->context());
+ if (!object_reg.is(eax)) {
+ __ pop(eax);
+ }
+ __ mov(ebx, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
__ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
@@ -4020,6 +4455,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
RelocInfo::CODE_TARGET, instr);
} else if (IsFastDoubleElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
__ mov(new_map_reg, to_map);
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(edx));
@@ -4107,7 +4543,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmp(char_code, String::kMaxAsciiCharCode);
+ __ cmp(char_code, String::kMaxOneByteCharCode);
__ j(above, deferred->entry());
__ Set(result, Immediate(factory()->single_character_string_cache()));
__ mov(result, FieldOperand(result,
@@ -4147,20 +4583,26 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- LOperand* input = instr->value();
- ASSERT(input->IsRegister() || input->IsStackSlot());
- LOperand* output = instr->result();
- ASSERT(output->IsDoubleRegister());
- __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+ } else {
+ UNREACHABLE();
+ }
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(SSE2);
LOperand* input = instr->value();
LOperand* output = instr->result();
LOperand* temp = instr->temp();
@@ -4238,9 +4680,27 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// the value in there. If that fails, call the runtime system.
__ SmiUntag(reg);
__ xor_(reg, 0x80000000);
- __ cvtsi2sd(xmm0, Operand(reg));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ cvtsi2sd(xmm0, Operand(reg));
+ } else {
+ __ push(reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(reg);
+ }
} else {
- __ LoadUint32(xmm0, reg, xmm1);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ LoadUint32(xmm0, reg, xmm1);
+ } else {
+ // There's no fild variant for unsigned values, so zero-extend to a 64-bit
+ // int manually.
+ __ push(Immediate(0));
+ __ push(reg);
+ __ fild_d(Operand(esp, 0));
+ __ pop(reg);
+ __ pop(reg);
+ }
}
if (FLAG_inline_new) {
@@ -4269,7 +4729,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in xmm0 into the value of the allocated heap
// number.
__ bind(&done);
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope feature_scope(SSE2);
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
__ StoreToSafepointRegisterSlot(reg, reg);
}
@@ -4285,18 +4750,83 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
LNumberTagD* instr_;
};
- XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
- Register tmp = ToRegister(instr->temp());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ bool use_sse2 = CpuFeatures::IsSupported(SSE2);
+ if (use_sse2) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ ucomisd(input_reg, input_reg);
+ } else {
+ if (!IsX87TopOfStack(instr->value())) {
+ __ fld_d(ToOperand(instr->value()));
+ }
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+ }
+
+ __ j(parity_odd, &no_special_nan_handling);
+ __ sub(esp, Immediate(kDoubleSize));
+ if (use_sse2) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(MemOperand(esp, 0), input_reg);
+ } else {
+ __ fld(0);
+ __ fstp_d(MemOperand(esp, 0));
+ }
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize);
+ __ add(esp, Immediate(kDoubleSize));
+ __ mov(reg, factory()->the_hole_value());
+ __ jmp(&done);
+ __ bind(&canonicalize);
+ __ add(esp, Immediate(kDoubleSize));
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ if (use_sse2) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(input_reg, Operand::StaticVariable(nan));
+ } else {
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(nan));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
+ Register tmp = ToRegister(instr->temp());
__ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+ } else {
+ if (!IsX87TopOfStack(instr->value())) {
+ __ fld_d(ToOperand(instr->value()));
+ }
+ __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ }
+ __ bind(&done);
}
@@ -4347,44 +4877,59 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Label load_smi, done;
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
- // Heap number map check.
- __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
- factory()->heap_number_map());
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
- __ cmp(input_reg, factory()->undefined_value());
- DeoptimizeIf(not_equal, env);
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN.
- ExternalReference nan =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(result_reg, Operand::StaticVariable(nan));
- __ jmp(&done, Label::kNear);
+ // Convert undefined to NaN.
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(result_reg, Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(result_reg, xmm_scratch);
- __ j(not_zero, &done, Label::kNear);
- __ movmskpd(temp_reg, result_reg);
- __ test_b(temp_reg, 1);
- DeoptimizeIf(not_zero, env);
+ __ bind(&heap_number);
+ }
+ // Heap number to XMM conversion.
+ __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(result_reg, xmm_scratch);
+ __ j(not_zero, &done, Label::kNear);
+ __ movmskpd(temp_reg, result_reg);
+ __ test_b(temp_reg, 1);
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ test(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ test(input_reg, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ ExternalReference hole_nan_reference =
+ ExternalReference::address_of_the_hole_nan();
+ __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference));
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
@@ -4408,6 +4953,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -4428,6 +4974,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
+ __ RecordComment("Deferred TaggedToI: exponent too big");
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
@@ -4438,6 +4985,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
} else {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, Operand(xmm0));
@@ -4451,8 +4999,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
}
- } else {
+ } else if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
// Deoptimize if we don't have a heap number.
+ __ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
@@ -4460,15 +5010,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
__ ucomisd(xmm0, xmm_temp);
+ __ RecordComment("Deferred TaggedToI: lost precision");
DeoptimizeIf(not_equal, instr->environment());
+ __ RecordComment("Deferred TaggedToI: NaN");
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, &done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
+ __ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
+ } else {
+ UNREACHABLE();
}
__ bind(&done);
}
@@ -4511,19 +5066,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope scope(SSE2);
+ Register input_reg = ToRegister(input);
+ XMMRegister result_reg = ToDoubleRegister(result);
+
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
- EmitNumberUntagD(input_reg,
- temp_reg,
- result_reg,
- instr->hydrogen()->deoptimize_on_undefined(),
- deoptimize_on_minus_zero,
- instr->environment());
+ EmitNumberUntagD(input_reg,
+ temp_reg,
+ result_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+ } else {
+ UNIMPLEMENTED();
+ }
}
@@ -4532,6 +5110,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
ASSERT(input->IsDoubleRegister());
LOperand* result = instr->result();
ASSERT(result->IsRegister());
+ CpuFeatures::Scope scope(SSE2);
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
@@ -4705,7 +5284,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
Handle<JSFunction> target = instr->hydrogen()->target();
- if (isolate()->heap()->InNewSpace(*target)) {
+ if (instr->hydrogen()->target_in_new_space()) {
Register reg = ToRegister(instr->value());
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(target);
@@ -4721,10 +5300,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -4742,12 +5321,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm0, result_reg);
@@ -4762,6 +5342,8 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope scope(SSE2);
+
ASSERT(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
Label is_smi, done, heap_number;
@@ -4799,26 +5381,26 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
- // Load prototype object.
- __ LoadHeapObject(reg, current_prototype);
+ ASSERT(prototypes->length() == maps->length());
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
-
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(reg, current_prototype);
+ // TODO(ulan): Move this check to hydrogen and split HCheckPrototypeMaps
+ // into two instruction: one that checks the prototypes and another that
+ // loads the holder (HConstant). Find a way to do it without breaking
+ // parallel recompilation.
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
+ } else {
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+ }
}
-
- // Check the holder map.
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@@ -4920,11 +5502,66 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ __ SmiTag(size);
+ PushSafepointRegistersScope scope(this);
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ if (!size.is(result)) {
+ __ StoreToSafepointRegisterSlot(result, size);
+ }
+ __ push(size);
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
+ AllocationSiteMode allocation_site_mode =
+ instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
@@ -4955,8 +5592,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(instr->hydrogen()->depth() == 1);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -4964,10 +5601,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
} else {
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4975,10 +5612,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset) {
+ int* offset,
+ AllocationSiteMode mode) {
ASSERT(!source.is(ecx));
ASSERT(!result.is(ecx));
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ object->map()->CanTrackAllocationSite();
+
if (FLAG_debug_code) {
__ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
@@ -5001,8 +5642,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
*offset += object_size + elements_size;
// Copy object header.
@@ -5021,13 +5667,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
+ isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
@@ -5036,6 +5684,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ __ mov(FieldOperand(result, object_size),
+ Immediate(Handle<Map>(isolate()->heap()->
+ allocation_site_info_map())));
+ __ mov(FieldOperand(result, object_size + kPointerSize), source);
+ }
+
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
@@ -5062,13 +5718,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
@@ -5118,7 +5775,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset,
+ instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
@@ -5129,28 +5787,36 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= instr->hydrogen()->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ push(Immediate(Smi::FromInt(flags)));
- // Pick the right runtime function or stub to call.
+ // Set up the parameters to the stub/runtime call and pick the right
+ // runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ PushHeapObject(literals);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ PushHeapObject(literals);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(constant_properties));
+ __ push(Immediate(Smi::FromInt(flags)));
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
+ __ LoadHeapObject(eax, literals);
+ __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(ecx, Immediate(constant_properties));
+ __ mov(edx, Immediate(Smi::FromInt(flags)));
FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5223,7 +5889,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->language_mode());
__ push(Immediate(shared_info));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(esi);
__ push(Immediate(shared_info));
@@ -5262,13 +5928,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ cmp(FieldOperand(input, HeapObject::kMapOffset),
factory()->heap_number_map());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
@@ -5276,17 +5942,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable);
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ cmp(input, factory()->true_value());
__ j(equal, true_label);
__ cmp(input, factory()->false_value());
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ cmp(input, factory()->null_value());
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ cmp(input, factory()->undefined_value());
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -5296,7 +5962,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
1 << Map::kIsUndetectable);
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5304,13 +5970,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ cmp(input, factory()->null_value());
__ j(equal, true_label);
}
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ if (FLAG_harmony_symbols) {
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ __ j(equal, true_label);
+ __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ } else {
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ }
__ j(below, false_label);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label);
@@ -5355,13 +6027,15 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- __ Nop(padding_size);
+ if (!info()->IsStub()) {
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ int patch_size = Deoptimizer::patch_size();
+ if (current_pc < last_lazy_deopt_pc_ + patch_size) {
+ int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
+ __ Nop(padding_size);
+ }
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
@@ -5381,6 +6055,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
@@ -5437,7 +6116,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
index 44ddaff..ab6779a 100644
--- a/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-codegen-ia32.h
@@ -55,15 +55,19 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4, info->zone()),
+ jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
translations_(info->zone()),
deferred_(8, info->zone()),
dynamic_frame_alignment_(false),
+ support_aligned_spilled_doubles_(false),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -78,10 +82,20 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Operand ToOperand(LOperand* op) const;
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsX87TopOfStack(LOperand* op) const;
bool IsInteger32(LConstantOperand* op) const;
Immediate ToInteger32Immediate(LOperand* op) const {
@@ -90,6 +104,9 @@ class LCodeGen BASE_EMBEDDED {
Handle<Object> ToHandle(LConstantOperand* op) const;
+ // A utility for instructions that return floating point values on X87.
+ void HandleX87FPReturnValue(LInstruction* instr);
+
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
Operand HighOperand(LOperand* op);
@@ -118,11 +135,12 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -172,7 +190,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -184,9 +202,7 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
- // Pad the reloc info to ensure that we have enough space to patch during
- // deoptimization.
- bool GenerateRelocPadding();
+ bool GenerateJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
@@ -219,6 +235,8 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
LOperand* context);
+ void LoadContextFromDeferred(LOperand* context);
+
enum EDIState {
EDI_UNINITIALIZED,
EDI_CONTAINS_TARGET
@@ -243,8 +261,10 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -266,7 +286,6 @@ class LCodeGen BASE_EMBEDDED {
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
@@ -288,16 +307,14 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- Register temp,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ void EmitNumberUntagD(
+ Register input,
+ Register temp,
+ XMMRegister result,
+ bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -337,7 +354,8 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt();
void DoLoadKeyedExternalArray(LLoadKeyed* instr);
@@ -356,19 +374,35 @@ class LCodeGen BASE_EMBEDDED {
MacroAssembler* const masm_;
CompilationInfo* const info_;
+ struct JumpTableEntry {
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
+ : label(),
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
+ Label label;
+ Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
+ };
+
int current_block_;
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
+ ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
TranslationBuffer translations_;
ZoneList<LDeferredCode*> deferred_;
bool dynamic_frame_alignment_;
+ bool support_aligned_spilled_doubles_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -386,6 +420,7 @@ class LCodeGen BASE_EMBEDDED {
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+ ASSERT(codegen_->info()->is_calling());
}
~PushSafepointRegistersScope() {
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
index 6428916..6fee7fe 100644
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) {
Register LGapResolver::GetFreeRegisterNot(Register reg) {
int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg);
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) {
return Register::FromAllocationIndex(i);
}
@@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() {
if (!moves_.is_empty()) return false;
if (spilled_register_ >= 0) return false;
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] != 0) return false;
if (destination_uses_[i] != 0) return false;
}
@@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() {
// 3. Prefer to spill a register that is not used in any remaining move
// because it will not need to be restored until the end.
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
if (source_uses_[i] == 0 && destination_uses_[i] == 0) {
Register scratch = Register::FromAllocationIndex(i);
__ push(scratch);
@@ -324,6 +324,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(SSE2);
XMMRegister src = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = cgen_->ToDoubleRegister(destination);
@@ -334,6 +335,7 @@ void LGapResolver::EmitMove(int index) {
__ movdbl(dst, src);
}
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(SSE2);
ASSERT(destination->IsDoubleRegister() ||
destination->IsDoubleStackSlot());
Operand src = cgen_->ToOperand(source);
@@ -346,7 +348,6 @@ void LGapResolver::EmitMove(int index) {
__ movdbl(xmm0, src);
__ movdbl(dst, xmm0);
}
-
} else {
UNREACHABLE();
}
@@ -410,6 +411,7 @@ void LGapResolver::EmitSwap(int index) {
__ mov(src, tmp0);
}
} else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(SSE2);
// XMM register-register swap. We rely on having xmm0
// available as a fixed scratch register.
XMMRegister src = cgen_->ToDoubleRegister(source);
@@ -419,6 +421,7 @@ void LGapResolver::EmitSwap(int index) {
__ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(SSE2);
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
@@ -432,6 +435,7 @@ void LGapResolver::EmitSwap(int index) {
__ movdbl(reg, Operand(xmm0));
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
// purpose temporary register and also rely on having xmm0 available as
// a fixed scratch register.
diff --git a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
index 0c81d72..3a58f58 100644
--- a/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-gap-resolver-ia32.h
@@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED {
ZoneList<LMoveOperands> moves_;
// Source and destination use counts for the general purpose registers.
- int source_uses_[Register::kNumAllocatableRegisters];
- int destination_uses_[Register::kNumAllocatableRegisters];
+ int source_uses_[Register::kMaxNumAllocatableRegisters];
+ int destination_uses_[Register::kMaxNumAllocatableRegisters];
// If we had to spill on demand, the currently spilled register's
// allocation index.
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.cc b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
index dcc5b77..910219d 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.cc
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -114,7 +114,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -299,12 +303,23 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
void LMathPowHalf::PrintDataTo(StringStream* stream) {
stream->Add("/pow_half ");
value()->PrintTo(stream);
}
+void LMathRound::PrintDataTo(StringStream* stream) {
+ stream->Add("/round ");
+ value()->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -358,6 +373,19 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ context()->PrintTo(stream);
+ stream->Add(" ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -408,11 +436,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -439,9 +483,11 @@ LPlatformChunk* LChunkBuilder::Build() {
status_ = BUILDING;
// Reserve the first spill slot for the state of dynamic alignment.
- int alignment_state_index = chunk_->GetNextSpillIndex(false);
- ASSERT_EQ(alignment_state_index, 0);
- USE(alignment_state_index);
+ if (info()->IsOptimizing()) {
+ int alignment_state_index = chunk_->GetNextSpillIndex(false);
+ ASSERT_EQ(alignment_state_index, 0);
+ USE(alignment_state_index);
+ }
const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
for (int i = 0; i < blocks->length(); i++) {
@@ -473,6 +519,12 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
}
+LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) {
+ return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+ X87TopOfStackRegister::ToAllocationIndex(reg));
+}
+
+
LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
return Use(value, ToUnallocated(fixed_register));
}
@@ -605,6 +657,13 @@ LInstruction* LChunkBuilder::DefineFixedDouble(
}
+template<int I, int T>
+LInstruction* LChunkBuilder::DefineX87TOS(
+ LTemplateInstruction<1, I, T>* instr) {
+ return Define(instr, ToUnallocated(x87tos));
+}
+
+
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
int argument_index_accumulator = 0;
@@ -617,6 +676,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -684,6 +745,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -979,6 +1045,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -1017,7 +1089,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ if (instr->HasNoUses()) return NULL;
+
+ if (info()->IsStub()) {
+ return DefineFixed(new(zone()) LContext, esi);
+ }
+
+ return DefineAsRegister(new(zone()) LContext);
}
@@ -1035,7 +1113,8 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+ return DefineAsRegister(new(zone()) LGlobalObject(context,
+ instr->qml_global()));
}
@@ -1071,6 +1150,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
return DefineSameAsFirst(result);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
} else if (op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
@@ -1084,6 +1171,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* temp = TempRegister();
LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
return DefineSameAsFirst(result);
+ } else if (op == kMathRound) {
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
}
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
@@ -1092,8 +1183,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
default:
@@ -1145,6 +1234,16 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ ASSERT(FLAG_optimize_constructed_arrays);
+ LOperand* context = UseFixed(instr->context(), esi);
+ LOperand* constructor = UseFixed(instr->constructor(), edi);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+ return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* function = UseFixed(instr->function(), edi);
@@ -1218,6 +1317,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div =
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineSameAsFirst(div));
+ }
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(edx);
@@ -1252,12 +1358,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
+ if (!right->IsConstant()) {
+ ASSERT(right->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into edx.
+ LOperand* temp = FixedTemp(edx);
+ LOperand* dividend = UseFixed(instr->left(), eax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(flooring_div, eax));
+ }
+
ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
int32_t divisor_si = HConstant::cast(right)->Integer32Value();
@@ -1449,7 +1574,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1614,6 +1739,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(ecx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), ecx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1651,8 +1798,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
// Temp register only necessary for minus zero check.
LOperand* temp = instr->deoptimize_on_minus_zero()
@@ -1677,8 +1828,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
- LOperand* value = UseRegister(instr->value());
- LOperand* temp = TempRegister();
+ info()->MarkAsDeferredCalling();
+ LOperand* value = CpuFeatures::IsSupported(SSE2)
+ ? UseRegisterAtStart(instr->value())
+ : UseAtStart(instr->value());
+ LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
// Make sure that temp and result_temp are different registers.
LUnallocated* result_temp = TempRegister();
@@ -1695,14 +1849,14 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
DefineAsRegister(new(zone()) LDoubleToI(value, temp)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
return DefineSameAsFirst(new(zone()) LSmiTag(value));
} else if (val->CheckFlag(HInstruction::kUint32)) {
- LOperand* temp = FixedTemp(xmm1);
- LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+ LNumberTagU* result = new(zone()) LNumberTagU(value);
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
} else {
LNumberTagI* result = new(zone()) LNumberTagI(value);
@@ -1752,14 +1906,19 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+ LOperand* value = UseAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
// If the target is in new space, we'll emit a global cell compare and so
// want the value in a register. If the target gets promoted before we
// emit code, we will still get the register but will do an immediate
// compare instead of the cell compare. This is safe.
- LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target())
- ? UseRegisterAtStart(instr->value())
- : UseAtStart(instr->value());
+ LOperand* value = instr->target_in_new_space()
+ ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
}
@@ -1793,7 +1952,10 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
- return new(zone()) LReturn(UseFixed(instr->value(), eax));
+ LOperand* context = info()->IsStub()
+ ? UseFixed(instr->context(), esi)
+ : NULL;
+ return new(zone()) LReturn(UseFixed(instr->value(), eax), context);
}
@@ -1933,14 +2095,17 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
ElementsKind elements_kind = instr->elements_kind();
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LOperand* key = instr->is_external() &&
- ExternalArrayOpRequiresTemp<HLoadKeyed>(instr)
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyed* result = NULL;
-#ifdef DEBUG
- if (instr->is_external()) {
+ if (!instr->is_external()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@@ -1948,10 +2113,10 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
}
-#endif
- LLoadKeyed* result = new(zone()) LLoadKeyed(elements, key);
DefineAsRegister(result);
bool can_deoptimize = instr->RequiresHoleCheck() ||
(elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
@@ -1973,57 +2138,60 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* elements;
- LOperand* val;
- LOperand* key;
-
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- if (instr->NeedsWriteBarrier() &&
- !IsFastDoubleElementsKind(elements_kind)) {
- val = UseTempRegister(instr->value());
- key = UseTempRegister(instr->key());
- elements = UseRegister(instr->elements());
- } else {
- val = UseRegisterAtStart(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- elements = UseRegisterAtStart(instr->elements());
- }
- } else {
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ if (instr->value()->representation().IsDouble()) {
+ LOperand* object = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- if (ExternalArrayOpRequiresTemp<HStoreKeyed>(instr)) {
- key = UseTempRegister(instr->key());
- elements = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(object, key, val);
} else {
- key = UseRegisterOrConstantAtStart(instr->key());
- elements = UseRegisterAtStart(instr->elements());
+ ASSERT(instr->value()->representation().IsTagged());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+ LOperand* obj = UseRegister(instr->elements());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(obj, key, val);
}
-
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
- val = val_is_fixed_register
- ? UseFixed(instr->value(), eax)
- : UseRegister(instr->value());
}
- LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
- ASSERT(result != NULL);
- return result;
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+
+ LOperand* external_pointer = UseRegister(instr->elements());
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+
+ LOperand* val = val_is_fixed_register
+ ? UseFixed(instr->value(), eax)
+ : UseRegister(instr->value());
+ bool clobbers_key = ExternalArrayOpRequiresTemp(
+ instr->key()->representation(), elements_kind);
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyed(external_pointer,
+ key,
+ val);
}
@@ -2045,28 +2213,44 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
- new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return DefineSameAsFirst(result);
+ new(zone()) LTransitionElementsKind(object, NULL,
+ new_map_reg, temp_reg);
+ return result;
+ } else if (FLAG_compiled_transitions) {
+ LOperand* context = UseRegister(instr->context());
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+ return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), eax);
LOperand* fixed_object_reg = FixedTemp(edx);
LOperand* new_map_reg = FixedTemp(ebx);
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object,
+ NULL,
new_map_reg,
fixed_object_reg);
- return MarkAsCall(DefineFixed(result, eax), instr);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -2145,13 +2329,24 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
LOperand* temp = TempRegister();
LAllocateObject* result = new(zone()) LAllocateObject(context, temp);
return AssignPointerMap(DefineAsRegister(result));
}
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* context = UseAny(instr->context());
+ LOperand* size = UseTempRegister(instr->size());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(context, size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
@@ -2205,8 +2400,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2278,7 +2482,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2307,6 +2511,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(new(zone()) LStackCheck(context), instr);
@@ -2326,8 +2531,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/src/3rdparty/v8/src/ia32/lithium-ia32.h b/src/3rdparty/v8/src/ia32/lithium-ia32.h
index a1adb01..e6fd655 100644
--- a/src/3rdparty/v8/src/ia32/lithium-ia32.h
+++ b/src/3rdparty/v8/src/ia32/lithium-ia32.h
@@ -43,6 +43,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(Allocate) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -61,6 +62,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@@ -87,6 +89,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@@ -100,6 +103,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -125,9 +129,11 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(MathPowHalf) \
+ V(MathRound) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -143,6 +149,7 @@ class LCodeGen;
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -165,6 +172,7 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -248,7 +256,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
- bool IsMarkedAsCall() const { return is_call_; }
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ virtual bool ClobbersDoubleRegisters() const {
+ return is_call_ || !CpuFeatures::IsSupported(SSE2);
+ }
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
@@ -354,6 +366,7 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+ virtual bool ClobbersDoubleRegisters() const { return false; }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
@@ -380,6 +393,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
+class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@@ -558,6 +580,8 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -614,7 +638,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -639,6 +663,27 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
@@ -657,6 +702,25 @@ class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
};
+class LMathRound: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -927,6 +991,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1157,6 +1234,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
@@ -1290,10 +1391,11 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> {
};
-class LReturn: public LTemplateInstruction<0, 1, 0> {
+class LReturn: public LTemplateInstruction<0, 2, 0> {
public:
- explicit LReturn(LOperand* value) {
+ explicit LReturn(LOperand* value, LOperand* context) {
inputs_[0] = value;
+ inputs_[1] = context;
}
DECLARE_CONCRETE_INSTRUCTION(Return, "return")
@@ -1391,7 +1493,6 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
inputs_[0] = elements;
inputs_[1] = key;
}
-
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
@@ -1401,23 +1502,29 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
return hydrogen()->is_external();
}
+ virtual bool ClobbersDoubleRegisters() const {
+ return !CpuFeatures::IsSupported(SSE2) &&
+ !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind());
+ }
+
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+ bool key_is_smi() {
+ return hydrogen()->key()->representation().IsTagged();
+ }
};
-template <class T>
-inline static bool ExternalArrayOpRequiresTemp(T* value) {
- CHECK(value->IsLoadKeyed() || value->IsStoreKeyed());
- Representation key_representation = value->key()->representation();
- ElementsKind elements_kind = value->elements_kind();
-
+inline static bool ExternalArrayOpRequiresTemp(
+ Representation key_representation,
+ ElementsKind elements_kind) {
// Operations that require the key to be divided by two to be converted into
// an index cannot fold the scale operation into a load and need an extra
// temp register to do the work.
- return !value->IsConstant() && key_representation.IsTagged() &&
+ return key_representation.IsTagged() &&
(elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_PIXEL_ELEMENTS);
@@ -1573,6 +1680,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
@@ -1719,7 +1827,8 @@ class LCallFunction: public LTemplateInstruction<1, 2, 0> {
class LCallGlobal: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LCallGlobal(LOperand* context, bool qml_global) : qml_global_(qml_global) {
+ explicit LCallGlobal(LOperand* context, bool qml_global)
+ : qml_global_(qml_global) {
inputs_[0] = context;
}
@@ -1770,6 +1879,25 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> {
};
+class LCallNewArray: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LCallNewArray(LOperand* context, LOperand* constructor) {
+ inputs_[0] = context;
+ inputs_[1] = constructor;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* constructor() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallRuntime(LOperand* context) {
@@ -1824,11 +1952,10 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> {
};
-class LNumberTagU: public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LNumberTagU(LOperand* value, LOperand* temp) {
+ explicit LNumberTagU(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
@@ -1848,6 +1975,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -2033,16 +2161,19 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
};
-class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
+class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> {
public:
LTransitionElementsKind(LOperand* object,
+ LOperand* context,
LOperand* new_map_temp,
LOperand* temp) {
inputs_[0] = object;
+ inputs_[1] = context;
temps_[0] = new_map_temp;
temps_[1] = temp;
}
+ LOperand* context() { return inputs_[1]; }
LOperand* object() { return inputs_[0]; }
LOperand* new_map_temp() { return temps_[0]; }
LOperand* temp() { return temps_[1]; }
@@ -2055,6 +2186,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
@@ -2172,8 +2321,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@@ -2253,6 +2404,23 @@ class LAllocateObject: public LTemplateInstruction<1, 1, 1> {
};
+class LAllocate: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+ inputs_[0] = context;
+ inputs_[1] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[0]; }
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LFastLiteral(LOperand* context) {
@@ -2398,8 +2566,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2563,6 +2732,7 @@ class LChunkBuilder BASE_EMBEDDED {
// Methods for getting operands for Use / Define / Temp.
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(XMMRegister reg);
+ LUnallocated* ToUnallocated(X87TopOfStackRegister reg);
// Methods for setting up define-use relationships.
MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
@@ -2623,6 +2793,8 @@ class LChunkBuilder BASE_EMBEDDED {
template<int I, int T>
LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr,
XMMRegister reg);
+ template<int I, int T>
+ LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr);
// Assigns an environment to an instruction. An instruction which can
// deoptimize must have an environment.
LInstruction* AssignEnvironment(LInstruction* instr);
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
index 26d0f92..587699f 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.cc
@@ -170,7 +170,7 @@ void MacroAssembler::LoadUint32(XMMRegister dst,
Label done;
cmp(src, Immediate(0));
movdbl(scratch,
- Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE));
+ Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32));
cvtsi2sd(dst, src);
j(not_sign, &done, Label::kNear);
addsd(dst, scratch);
@@ -385,7 +385,7 @@ void MacroAssembler::DebugBreak() {
Set(eax, Immediate(0));
mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate())));
CEntryStub ces(1);
- call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif
@@ -406,7 +406,7 @@ void MacroAssembler::Set(const Operand& dst, const Immediate& x) {
bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) {
static const int kMaxImmediateBits = 17;
- if (x.rmode_ != RelocInfo::NONE) return false;
+ if (!RelocInfo::IsNone(x.rmode_)) return false;
return !is_intn(x.x_, kMaxImmediateBits);
}
@@ -507,7 +507,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor) {
+ bool specialize_for_processor,
+ int elements_offset) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -529,12 +530,14 @@ void MacroAssembler::StoreNumberToDoubleElements(
CpuFeatures::Scope use_sse2(SSE2);
movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ movdbl(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
jmp(&done);
@@ -561,13 +564,15 @@ void MacroAssembler::StoreNumberToDoubleElements(
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatures::Scope fscope(SSE2);
cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
+ movdbl(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset),
scratch2);
} else {
push(scratch1);
fild_s(Operand(esp, 0));
pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
+ fstp_d(FieldOperand(elements, key, times_4,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
bind(&done);
}
@@ -1236,6 +1241,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1255,6 +1261,19 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
Register top_reg = result_end.is_valid() ? result_end : result;
// Calculate new top and bail out if new space is exhausted.
@@ -1273,26 +1292,31 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
UpdateAllocationTopHelper(top_reg, scratch);
// Tag result if requested.
+ bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
+ if (tag_result) {
sub(result, Immediate(object_size - kHeapObjectTag));
} else {
sub(result, Immediate(object_size));
}
- } else if ((flags & TAG_OBJECT) != 0) {
- add(result, Immediate(kHeapObjectTag));
+ } else if (tag_result) {
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
}
-void MacroAssembler::AllocateInNewSpace(int header_size,
- ScaleFactor element_size,
- Register element_count,
- Register result,
- Register result_end,
- Register scratch,
- Label* gc_required,
- AllocationFlags flags) {
+void MacroAssembler::AllocateInNewSpace(
+ int header_size,
+ ScaleFactor element_size,
+ Register element_count,
+ RegisterValueType element_count_type,
+ Register result,
+ Register result_end,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1311,21 +1335,44 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
// We assume that element_count*element_size + header_size does not
// overflow.
+ if (element_count_type == REGISTER_VALUE_IS_SMI) {
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
+ STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
+ ASSERT(element_size >= times_2);
+ ASSERT(kSmiTagSize == 1);
+ element_size = static_cast<ScaleFactor>(element_size - 1);
+ } else {
+ ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+ }
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(carry, gc_required);
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
- // Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
// Update allocation top.
@@ -1339,6 +1386,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -1357,6 +1405,19 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ Label aligned;
+ test(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ mov(Operand(result, 0),
+ Immediate(isolate()->factory()->one_pointer_filler_map()));
+ add(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
@@ -1370,7 +1431,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- lea(result, Operand(result, kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ inc(result);
}
// Update allocation top.
@@ -1428,6 +1490,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
AllocateInNewSpace(SeqTwoByteString::kHeaderSize,
times_1,
scratch1,
+ REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
@@ -1453,16 +1516,17 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
+ REGISTER_VALUE_IS_INT32,
result,
scratch2,
scratch3,
@@ -1488,7 +1552,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT(length > 0);
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::SizeFor(length),
+ AllocateInNewSpace(SeqOneByteString::SizeFor(length),
result,
scratch1,
scratch2,
@@ -1738,13 +1802,13 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
- call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- jmp(stub->GetCode(), RelocInfo::CODE_TARGET);
+ jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -1796,7 +1860,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
Set(eax, Immediate(function->nargs));
mov(ebx, Immediate(ExternalReference(function, isolate())));
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs
+ : kDontSaveFPRegs);
CallStub(&ces);
}
@@ -1904,20 +1969,36 @@ void MacroAssembler::PrepareCallApiFunction(int argc) {
void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
int stack_space) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
ExternalReference limit_address =
- ExternalReference::handle_scope_limit_address();
+ ExternalReference::handle_scope_limit_address(isolate());
ExternalReference level_address =
- ExternalReference::handle_scope_level_address();
+ ExternalReference::handle_scope_level_address(isolate());
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, eax);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// Call the api function.
call(function_address, RelocInfo::RUNTIME_ENTRY);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, eax);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
if (!kReturnHandlesDirectly) {
// PrepareCallApiFunction saved pointer to the output slot into
// callee-save register esi.
@@ -2016,7 +2097,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) {
// Set the entry point and jump to the C entry runtime stub.
mov(ebx, Immediate(ext));
CEntryStub ces(1);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+ jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -2328,12 +2409,23 @@ void MacroAssembler::LoadInitialArrayMap(
}
+void MacroAssembler::LoadGlobalContext(Register global_context) {
+ // Load the global or builtins object from the current context.
+ mov(global_context,
+ Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the native context from the global or builtins object.
+ mov(global_context,
+ FieldOperand(global_context, GlobalObject::kNativeContextOffset));
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
mov(function,
Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Load the native context from the global or builtins object.
- mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
+ mov(function,
+ FieldOperand(function, GlobalObject::kNativeContextOffset));
// Load the function from the native context.
mov(function, Operand(function, Context::SlotOffset(index)));
}
@@ -2913,8 +3005,8 @@ void MacroAssembler::EnsureNotWhite(
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
- ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqAsciiString::kMaxSize <=
+ ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqOneByteString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
@@ -2981,6 +3073,29 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
j(not_equal, &next);
}
+
+void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+ Register receiver_reg,
+ Register scratch_reg) {
+ Label no_info_available;
+
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ cmp(scratch_reg, Immediate(new_space_start));
+ j(less, &no_info_available);
+ cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top));
+ j(greater, &no_info_available);
+ cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
+ Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
+ bind(&no_info_available);
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
index b91cfcd..3a6e17b 100644
--- a/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
+++ b/src/3rdparty/v8/src/ia32/macro-assembler-ia32.h
@@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -55,6 +43,12 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum RegisterValueType {
+ REGISTER_VALUE_IS_SMI,
+ REGISTER_VALUE_IS_INT32
+};
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
@@ -255,6 +249,8 @@ class MacroAssembler: public Assembler {
Register map_out,
bool can_have_holes);
+ void LoadGlobalContext(Register global_context);
+
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
@@ -388,7 +384,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor);
+ bool specialize_for_processor,
+ int offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -575,6 +572,7 @@ class MacroAssembler: public Assembler {
void AllocateInNewSpace(int header_size,
ScaleFactor element_size,
Register element_count,
+ RegisterValueType element_count_type,
Register result,
Register result_end,
Register scratch,
@@ -788,6 +786,7 @@ class MacroAssembler: public Assembler {
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
@@ -862,6 +861,15 @@ class MacroAssembler: public Assembler {
// in eax. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Label* call_runtime);
+ // AllocationSiteInfo support. Arrays may have an associated
+ // AllocationSiteInfo object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, conditional code is set to equal
+ void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
+ Register scratch_reg);
+
private:
bool generating_stub_;
bool allow_stub_calls_;
@@ -922,9 +930,9 @@ class MacroAssembler: public Assembler {
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
diff --git a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 622dc42..49c75e1 100644
--- a/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -217,7 +217,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str,
// If input is ASCII, don't even bother calling here if the string to
// match contains a non-ASCII character.
if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
+ ASSERT(String::IsOneByte(str.start(), str.length()));
}
#endif
int byte_length = str.length() * char_size();
@@ -344,7 +344,19 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
__ or_(eax, 0x20); // Convert match character to lower-case.
__ lea(ecx, Operand(eax, -'a'));
__ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter?
- __ j(above, &fail);
+#ifndef ENABLE_LATIN_1
+ __ j(above, &fail); // Weren't letters anyway.
+#else
+ Label convert_capture;
+ __ j(below_equal, &convert_capture); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ sub(ecx, Immediate(224 - 'a'));
+ __ cmp(ecx, Immediate(254 - 224));
+ __ j(above, &fail); // Weren't Latin-1 letters.
+ __ cmp(ecx, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, &fail);
+ __ bind(&convert_capture);
+#endif
// Also convert capture character.
__ movzx_b(ecx, Operand(edx, 0));
__ or_(ecx, 0x20);
@@ -569,7 +581,7 @@ void RegExpMacroAssemblerIA32::CheckBitInTable(
Label* on_bit_set) {
__ mov(eax, Immediate(table));
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ mov(ebx, kTableSize - 1);
__ and_(ebx, current_character());
index = ebx;
@@ -1197,7 +1209,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1228,7 +1240,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
index 11efb72..34ce36d 100644
--- a/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
+++ b/src/3rdparty/v8/src/ia32/stub-cache-ia32.cc
@@ -141,14 +141,14 @@ static void ProbeTable(Isolate* isolate,
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be an internalized string and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
Handle<String> name,
Register r0,
Register r1) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
@@ -376,19 +376,25 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ mov(dst, FieldOperand(src, offset));
- } else {
+ PropertyIndex index) {
+ DoGenerateFastPropertyLoad(
+ masm, dst, src, index.is_inobject(holder), index.translate(holder));
+}
+
+
+void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index) {
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ mov(dst, FieldOperand(dst, offset));
+ src = dst;
}
+ __ mov(dst, FieldOperand(src, offset));
}
@@ -487,7 +493,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Pass the additional arguments.
__ mov(Operand(esp, 2 * kPointerSize), edi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ mov(ecx, api_call_info);
__ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset));
@@ -732,6 +738,15 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ Handle<Code> code = (kind == Code::STORE_IC)
+ ? masm->isolate()->builtins()->StoreIC_Miss()
+ : masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
Handle<Code> code =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
@@ -922,6 +937,11 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
+void StubCompiler::GenerateTailCall(Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -930,7 +950,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register scratch2,
Handle<String> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -958,8 +980,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsInternalizedString()) {
+ name = factory()->InternalizeString(name);
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
@@ -977,8 +999,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Save the map in scratch1 for later.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
@@ -1010,9 +1034,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1030,128 +1056,140 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ __ bind(miss);
+ GenerateLoadMiss(masm(), kind());
+ }
+}
- // Check the prototype chain.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
- __ ret(0);
-}
+Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label miss;
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
- Register dictionary = scratch1;
- bool must_preserve_dictionary_reg = receiver.is(dictionary);
-
- // Load the properties dictionary.
- if (must_preserve_dictionary_reg) {
- __ push(dictionary);
- }
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done, pop_and_miss;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- &pop_and_miss,
- &probe_done,
- dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&pop_and_miss);
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
- }
- __ jmp(miss);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch2 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch2;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ mov(scratch3,
- Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
- if (must_preserve_dictionary_reg) {
- __ pop(dictionary);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ Register dictionary = scratch1();
+ bool must_preserve_dictionary_reg = reg.is(dictionary);
+
+ // Load the properties dictionary.
+ if (must_preserve_dictionary_reg) {
+ __ push(dictionary);
+ }
+ __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done, pop_and_miss;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &pop_and_miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&pop_and_miss);
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ jmp(&miss);
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch2 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch2();
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ mov(scratch3(),
+ Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
+ if (must_preserve_dictionary_reg) {
+ __ pop(dictionary);
+ }
+ __ cmp(scratch3(), callback);
+ __ j(not_equal, &miss);
}
- __ cmp(scratch3, callback);
- __ j(not_equal, miss);
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Label* success,
+ Handle<GlobalObject> global) {
+ Label miss;
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
+ Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch1, scratch2, scratch3, callback, name, miss);
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
+ if (!last->HasFastProperties()) {
+ __ mov(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
+ __ mov(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
+ __ cmp(scratch2(), isolate()->factory()->null_value());
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(success, &miss);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index) {
+ // Get the value from the properties.
+ GenerateFastPropertyLoad(masm(), eax, reg, holder, index);
+ __ ret(0);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch3.is(reg));
- __ pop(scratch3); // Get return address to place it below.
+ ASSERT(!scratch3().is(reg));
+ __ pop(scratch3()); // Get return address to place it below.
- __ push(receiver); // receiver
- __ mov(scratch2, esp);
- ASSERT(!scratch2.is(reg));
+ __ push(receiver()); // receiver
+ __ mov(scratch2(), esp);
+ ASSERT(!scratch2().is(reg));
__ push(reg); // holder
- // Push data from AccessorInfo.
+ // Push data from ExecutableAccessorInfo.
if (isolate()->heap()->InNewSpace(callback->data())) {
- __ mov(scratch1, Immediate(callback));
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset));
+ __ mov(scratch1(), Immediate(callback));
+ __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset));
} else {
- __ push(Immediate(Handle<Object>(callback->data())));
+ __ push(Immediate(Handle<Object>(callback->data(), isolate())));
}
__ push(Immediate(reinterpret_cast<int>(isolate())));
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
- __ push(scratch2);
+ // Save a pointer to where we pushed the arguments pointer. This will be
+ // passed as the const ExecutableAccessorInfo& to the C++ callback.
+ __ push(scratch2());
- __ push(name_reg); // name
+ __ push(name()); // name
__ mov(ebx, esp); // esp points to reference to name (handler).
- __ push(scratch3); // Restore return address.
+ __ push(scratch3()); // Restore return address.
// 4 elements array for v8::Arguments::values_, handler for name and pointer
// to the values (it considered as smi in GC).
@@ -1172,44 +1210,22 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
-
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
// Return the constant value.
__ LoadHeapObject(eax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void BaseLoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1218,8 +1234,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1229,17 +1246,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1248,18 +1262,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver);
+ __ push(receiver());
}
__ push(holder_reg);
- __ push(name_reg);
+ __ push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
@@ -1273,76 +1287,28 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Clobber registers when generating debug-code to provoke errors.
__ bind(&interceptor_failed);
if (FLAG_debug_code) {
- __ mov(receiver, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
__ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
- __ mov(name_reg, Immediate(BitCast<int32_t>(kZapValue)));
+ __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
}
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into holder_reg.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), eax, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ mov(holder_reg, Immediate(callback));
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ push(Immediate(reinterpret_cast<int>(isolate())));
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg =
- CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3, name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ push(scratch2()); // restore old return address
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
@@ -1423,7 +1389,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1518,7 +1484,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1526,7 +1492,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1557,17 +1523,49 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+
+ // Check that the elements are in double mode.
+ __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
+ Immediate(factory()->fixed_double_array_map()));
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into eax and calculate new length.
+ __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ add(eax, Immediate(Smi::FromInt(argc)));
+
+ // Get the elements' length into ecx.
+ __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmp(eax, ecx);
+ __ j(greater, &call_builtin);
+
+ __ mov(ecx, Operand(esp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
+
+ // Save new length.
+ __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(factory()->heap_number_map()));
+ __ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
@@ -1579,7 +1577,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ jmp(&fast_object);
@@ -1591,7 +1591,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
edi,
&call_builtin);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
__ bind(&fast_object);
@@ -1821,8 +1823,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
eax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = ebx;
Register index = edi;
@@ -1904,8 +1907,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
eax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
Register receiver = eax;
Register index = edi;
@@ -2318,11 +2322,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- ecx : name
// -- esp[0] : return address
@@ -2330,15 +2334,6 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
// -- ...
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2371,76 +2366,93 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a string or a symbol.
- __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(edx, &fast);
- __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CmpObjectType(edx, SYMBOL_TYPE, eax);
+ __ j(not_equal, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ cmp(edx, factory()->true_value());
- __ j(equal, &fast);
- __ cmp(edx, factory()->false_value());
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- eax, holder, ebx, edx, edi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(edx, &fast);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
+ break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(edx, factory()->true_value());
+ __ j(equal, &fast);
+ __ cmp(edx, factory()->false_value());
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ eax, holder, ebx, edx, edi, name, &miss);
break;
+ }
}
+ __ jmp(success);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
NullCallWrapper(), call_kind);
+}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2604,7 +2616,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
+ Handle<ExecutableAccessorInfo> callback) {
// ----------- S t a t e -------------
// -- eax : value
// -- ecx : name
@@ -2867,7 +2879,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode();
+ KeyedStoreElementStub(is_jsarray,
+ elements_kind,
+ grow_mode_).GetCode(isolate());
__ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
@@ -2910,89 +2924,49 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(edx, &miss);
-
- ASSERT(last->IsGlobalObject() || last->HasFastProperties());
-
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Handle<GlobalObject> global) {
+ Label success;
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, eax, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ mov(eax, isolate()->factory()->undefined_value());
__ ret(0);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
+ return registers;
+}
- GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, no_reg,
- callback, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss) {
+ __ cmp(name_reg, Immediate(name));
+ __ j(not_equal, miss);
}
@@ -3033,322 +3007,44 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
- name, &miss);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
+ Handle<GlobalObject> global,
Handle<JSGlobalPropertyCell> cell,
Handle<String> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- ecx : name
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(edx, &miss);
- CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss);
+ Label success, miss;
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
if (Serializer::enabled()) {
- __ mov(ebx, Immediate(cell));
- __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
+ __ mov(eax, Immediate(cell));
+ __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset));
} else {
- __ mov(ebx, Operand::Cell(cell));
+ __ mov(eax, Operand::Cell(cell));
}
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(ebx, factory()->the_hole_value());
+ __ cmp(eax, factory()->the_hole_value());
__ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ cmp(ebx, factory()->the_hole_value());
+ __ cmp(eax, factory()->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ HandlerFrontendFooter(&success, &miss);
+ __ bind(&success);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
- __ mov(eax, ebx);
+ // The code above already loads the result into the return register.
__ ret(0);
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, no_reg,
- callback, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadConstant(
- receiver, holder, edx, ebx, eax, edi, value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadArrayLength(masm(), edx, eax, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ cmp(ecx, Immediate(name));
- __ j(not_equal, &miss);
-
- GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
}
@@ -3361,41 +3057,53 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode(isolate());
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode(isolate());
+ __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK);
+ }
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<String> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(edx, &miss);
- Register map_reg = ebx;
- __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset));
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
+
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
+ __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
__ cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handler_ics->at(current));
+ __ j(equal, handlers->at(current));
}
__ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ GenerateLoadMiss(masm(), kind());
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ InlineCacheState state =
+ receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(Code::IC_FRAGMENT, type, name, state);
}
@@ -3500,7 +3208,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ mov(Operand(edx, i * kPointerSize), ebx);
} else {
// Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
+ isolate());
__ mov(Operand(edx, i * kPointerSize), Immediate(constant));
}
}
@@ -3624,157 +3333,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, failed_allocation, slow;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
- __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
- __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset));
- // ebx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movsx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ SmiUntag(ecx); // Untag the index.
- __ movzx_b(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(eax, Operand(ebx, ecx, times_1, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- __ mov(eax, Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ fld_s(Operand(ebx, ecx, times_2, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ fld_d(Operand(ebx, ecx, times_4, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // eax: value
- // For floating-point array type:
- // FP(0): value
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ cmp(eax, 0xc0000000);
- __ j(sign, &box_int);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &box_int);
- }
-
- __ SmiTag(eax);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- __ push(eax);
- __ fild_s(Operand(esp, 0));
- __ pop(eax);
- } else {
- ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind);
- // Need to zero-extend the value.
- // There's no fild variant for unsigned values, so zero-extend
- // to a 64-bit int manually.
- __ push(Immediate(0));
- __ push(eax);
- __ fild_d(Operand(esp, 0));
- __ pop(eax);
- __ pop(eax);
- }
- // FP(0): value
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation);
- // Set the value.
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(0);
- } else {
- __ SmiTag(eax);
- __ ret(0);
- }
-
- // If we fail allocation of the HeapNumber, we still have a value on
- // top of the FPU stack. Remove it.
- __ bind(&failed_allocation);
- __ fstp(0);
- // Fall through to slow case.
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -3974,106 +3532,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- __ mov(ebx, Operand(eax, ecx, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
- __ j(equal, &miss_force_generic);
- __ mov(eax, ebx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ecx : key
- // -- edx : receiver
- // -- esp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
- __ AssertFastElements(eax);
-
- // Check that the key is within bounds.
- __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(xmm0, FieldOperand(eax, ecx, times_4,
- FixedDoubleArray::kHeaderSize));
- } else {
- __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize));
- }
- __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber);
- // Set the value.
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
- } else {
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- }
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- // A value was pushed on the floating point stack before the allocation, if
- // the allocation fails it needs to be removed.
- if (!CpuFeatures::IsSupported(SSE2)) {
- __ fstp(0);
- }
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
@@ -4316,13 +3774,22 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// ecx: key
// edx: receiver
// edi: elements
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
+ // Initialize the new FixedDoubleArray.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
__ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
+ __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
+ &transition_elements_kind, true);
+
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ int offset = FixedDoubleArray::OffsetOfElementAt(i);
+ __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
+ __ mov(FieldOperand(edi, offset + kPointerSize),
+ Immediate(kHoleNanUpper32));
+ }
+
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
@@ -4332,7 +3799,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ ret(0);
__ bind(&check_capacity);
// eax: value
diff --git a/src/3rdparty/v8/src/ic-inl.h b/src/3rdparty/v8/src/ic-inl.h
index 49b6ef9..9439792 100644
--- a/src/3rdparty/v8/src/ic-inl.h
+++ b/src/3rdparty/v8/src/ic-inl.h
@@ -43,7 +43,8 @@ Address IC::address() const {
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = Isolate::Current()->debug();
+ ASSERT(Isolate::Current() == isolate());
+ Debug* debug = isolate()->debug();
// First check if any break points are active if not just return the address
// of the call.
if (!debug->has_break_points()) return result;
@@ -106,8 +107,9 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object,
return GetCodeCacheForObject(JSObject::cast(object), holder);
}
// If the object is a value, we use the prototype map for the cache.
- ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean());
- return PROTOTYPE_MAP;
+ ASSERT(object->IsString() || object->IsSymbol() ||
+ object->IsNumber() || object->IsBoolean());
+ return DELEGATE_MAP;
}
@@ -122,14 +124,16 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object,
!object->HasFastProperties() &&
!object->IsJSGlobalProxy() &&
!object->IsJSGlobalObject()) {
- return PROTOTYPE_MAP;
+ return DELEGATE_MAP;
}
return OWN_MAP;
}
-JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) {
- Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype());
+JSObject* IC::GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
+ InlineCacheHolderFlag holder) {
+ Object* map_owner = holder == OWN_MAP ? object : object->GetDelegate(isolate);
ASSERT(map_owner->IsJSObject());
return JSObject::cast(map_owner);
}
diff --git a/src/3rdparty/v8/src/ic.cc b/src/3rdparty/v8/src/ic.cc
index fe31ef1..a9163db 100644
--- a/src/3rdparty/v8/src/ic.cc
+++ b/src/3rdparty/v8/src/ic.cc
@@ -43,16 +43,17 @@ namespace internal {
char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
- case PREMONOMORPHIC: return 'P';
+ case PREMONOMORPHIC: return '.';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
+ case POLYMORPHIC: return 'P';
+ case MEGAMORPHIC: return 'N';
+ case GENERIC: return 'G';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
// these cases fall through to the unreachable code below.
- case DEBUG_BREAK: break;
- case DEBUG_PREPARE_STEP_IN: break;
+ case DEBUG_STUB: break;
}
UNREACHABLE();
return 0;
@@ -63,15 +64,14 @@ void IC::TraceIC(const char* type,
State old_state,
Code* new_target) {
if (FLAG_trace_ic) {
- State new_state = StateFrom(new_target,
- HEAP->undefined_value(),
- HEAP->undefined_value());
+ Object* undef = new_target->GetHeap()->undefined_value();
+ State new_state = StateFrom(new_target, undef, undef);
PrintF("[%s in ", type);
- StackFrameIterator it;
+ Isolate* isolate = new_target->GetIsolate();
+ StackFrameIterator it(isolate);
while (it.frame()->fp() != this->fp()) it.Advance();
StackFrame* raw_frame = it.frame();
if (raw_frame->is_internal()) {
- Isolate* isolate = new_target->GetIsolate();
Code* apply_builtin = isolate->builtins()->builtin(
Builtins::kFunctionApply);
if (raw_frame->unchecked_code() == apply_builtin) {
@@ -80,7 +80,7 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
- JavaScriptFrame::PrintTop(stdout, false, true);
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
bool new_can_grow =
Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH;
@@ -93,42 +93,41 @@ void IC::TraceIC(const char* type,
}
}
-#define TRACE_GENERIC_IC(type, reason) \
+#define TRACE_GENERIC_IC(isolate, type, reason) \
do { \
if (FLAG_trace_ic) { \
PrintF("[%s patching generic stub in ", type); \
- JavaScriptFrame::PrintTop(stdout, false, true); \
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
PrintF(" (%s)]\n", reason); \
} \
} while (false)
#else
-#define TRACE_GENERIC_IC(type, reason)
+#define TRACE_GENERIC_IC(isolate, type, reason)
#endif // DEBUG
#define TRACE_IC(type, name, old_state, new_target) \
ASSERT((TraceIC(type, name, old_state, new_target), true))
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
- ASSERT(isolate == Isolate::Current());
- // To improve the performance of the (much used) IC code, we unfold
- // a few levels of the stack frame iteration code. This yields a
- // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag.
+ // To improve the performance of the (much used) IC code, we unfold a few
+ // levels of the stack frame iteration code. This yields a ~35% speedup when
+ // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
const Address entry =
Isolate::c_entry_fp(isolate->thread_local_top());
Address* pc_address =
reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
- // If there's another JavaScript frame on the stack, we need to look
- // one frame further down the stack to find the frame pointer and
- // the return address stack slot.
+ // If there's another JavaScript frame on the stack or a
+ // StubFailureTrampoline, we need to look one frame further down the stack to
+ // find the frame pointer and the return address stack slot.
if (depth == EXTRA_CALL_FRAME) {
const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
}
#ifdef DEBUG
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
for (int i = 0; i < depth + 1; i++) it.Advance();
StackFrame* frame = it.frame();
ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
@@ -140,11 +139,11 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() const {
- HandleScope scope;
+ HandleScope scope(isolate());
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
// corresponding to the frame.
- StackFrameIterator it;
+ StackFrameIterator it(isolate());
while (it.frame()->fp() != this->fp()) it.Advance();
JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
// Find the function on the stack and both the active code for the
@@ -169,42 +168,23 @@ Address IC::OriginalCodeAddress() const {
#endif
-static bool HasNormalObjectsInPrototypeChain(Isolate* isolate,
- LookupResult* lookup,
- Object* receiver) {
- Object* end = lookup->IsProperty()
- ? lookup->holder() : Object::cast(isolate->heap()->null_value());
- for (Object* current = receiver;
- current != end;
- current = current->GetPrototype()) {
- if (current->IsJSObject() &&
- !JSObject::cast(current)->HasFastProperties() &&
- !current->IsJSGlobalProxy() &&
- !current->IsJSGlobalObject()) {
- return true;
- }
- }
-
- return false;
-}
-
-
static bool TryRemoveInvalidPrototypeDependentStub(Code* target,
Object* receiver,
Object* name) {
InlineCacheHolderFlag cache_holder =
Code::ExtractCacheHolderFromFlags(target->flags());
+ Isolate* isolate = target->GetIsolate();
if (cache_holder == OWN_MAP && !receiver->IsJSObject()) {
// The stub was generated for JSObject but called for non-JSObject.
// IC::GetCodeCacheHolder is not applicable.
return false;
- } else if (cache_holder == PROTOTYPE_MAP &&
- receiver->GetPrototype()->IsNull()) {
+ } else if (cache_holder == DELEGATE_MAP &&
+ receiver->GetPrototype(isolate)->IsNull()) {
// IC::GetCodeCacheHolder is not applicable.
return false;
}
- Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map();
+ Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map();
// Decide whether the inline cache failed because of changes to the
// receiver itself or changes to one of its prototypes.
@@ -273,7 +253,7 @@ RelocInfo::Mode IC::ComputeMode() {
if (info->pc() == addr) return info->rmode();
}
UNREACHABLE();
- return RelocInfo::NONE;
+ return RelocInfo::NONE32;
}
@@ -310,7 +290,8 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
return;
}
- Code* host = target->GetHeap()->isolate()->
+ Isolate* isolate = target->GetHeap()->isolate();
+ Code* host = isolate->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
@@ -333,7 +314,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
- Isolate::Current()->runtime_profiler()->NotifyICChanged();
+ isolate->runtime_profiler()->NotifyICChanged();
}
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -345,15 +326,13 @@ void IC::Clear(Address address) {
Code* target = GetTargetAtAddress(address);
// Don't clear debug break inline cache as it will remove the break point.
- if (target->ic_state() == DEBUG_BREAK) return;
+ if (target->is_debug_break()) return;
switch (target->kind()) {
case Code::LOAD_IC: return LoadIC::Clear(address, target);
- case Code::KEYED_LOAD_IC:
- return KeyedLoadIC::Clear(address, target);
+ case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target);
case Code::STORE_IC: return StoreIC::Clear(address, target);
- case Code::KEYED_STORE_IC:
- return KeyedStoreIC::Clear(address, target);
+ case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target);
case Code::CALL_IC: return CallIC::Clear(address, target);
case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target);
case Code::COMPARE_IC: return CompareIC::Clear(address, target);
@@ -385,13 +364,13 @@ void KeyedLoadIC::Clear(Address address, Code* target) {
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
- SetTargetAtAddress(address, initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub());
}
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
- SetTargetAtAddress(address, initialize_stub());
+ SetTargetAtAddress(address, *initialize_stub());
}
@@ -399,8 +378,8 @@ void StoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
+ ? *initialize_stub_strict()
+ : *initialize_stub());
}
@@ -408,17 +387,19 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
SetTargetAtAddress(address,
(Code::GetStrictMode(target->extra_ic_state()) == kStrictMode)
- ? initialize_stub_strict()
- : initialize_stub());
+ ? *initialize_stub_strict()
+ : *initialize_stub());
}
void CompareIC::Clear(Address address, Code* target) {
- // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
- if (target->major_key() != CodeStub::CompareIC) return;
+ ASSERT(target->major_key() == CodeStub::CompareIC);
+ CompareIC::State handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
+ &handler_state, &op);
// Only clear CompareICs that can retain objects.
- if (target->compare_state() != KNOWN_OBJECTS) return;
- Token::Value op = CompareIC::ComputeOperation(target);
+ if (handler_state != KNOWN_OBJECT) return;
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -454,7 +435,7 @@ static void LookupForRead(Handle<Object> object,
return;
}
- Handle<Object> proto(holder->GetPrototype());
+ Handle<Object> proto(holder->GetPrototype(), name->GetIsolate());
if (proto->IsNull()) {
ASSERT(!lookup->IsFound());
return;
@@ -472,7 +453,7 @@ Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
// Patch the receiver and use the delegate as the function to
// invoke. This is used for invoking objects as if they were functions.
const int argc = target()->arguments_count();
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate());
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *object);
@@ -485,7 +466,8 @@ Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) {
void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
Handle<Object> object) {
while (callee->IsJSFunctionProxy()) {
- callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap());
+ callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(),
+ isolate());
}
if (callee->IsJSFunction()) {
@@ -500,7 +482,7 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
// Change the receiver to the result of calling ToObject on it.
const int argc = this->target()->arguments_count();
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate());
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *isolate()->factory()->ToObject(object));
@@ -540,7 +522,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
if (!lookup.IsFound()) {
// If the object does not have the requested property, check which
// exception we need to throw.
- return IsContextual(object)
+ return IsUndeclaredGlobal(object)
? ReferenceError("not_defined", name)
: TypeError("undefined_method", object, name);
}
@@ -559,7 +541,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
if (lookup.IsInterceptor() && attr == ABSENT) {
// If the object does not have the requested property, check which
// exception we need to throw.
- return IsContextual(object)
+ return IsUndeclaredGlobal(object)
? ReferenceError("not_defined", name)
: TypeError("undefined_method", object, name);
}
@@ -646,7 +628,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
case FIELD: {
- int index = lookup->GetFieldIndex();
+ PropertyIndex index = lookup->GetFieldIndex();
return isolate()->stub_cache()->ComputeCallField(
argc, kind_, extra_state, name, object, holder, index);
}
@@ -699,17 +681,8 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
// Bail out if we didn't find a result.
if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
- if (lookup->holder() != *object &&
- HasNormalObjectsInPrototypeChain(
- isolate(), lookup, object->GetPrototype())) {
- // Suppress optimization for prototype chains with slow properties objects
- // in the middle.
- return;
- }
-
// Compute the number of arguments.
int argc = target()->arguments_count();
- bool had_proto_failure = false;
Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
@@ -726,7 +699,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
TryRemoveInvalidPrototypeDependentStub(target(),
*object,
*name)) {
- had_proto_failure = true;
+ state = MONOMORPHIC_PROTOTYPE_FAILURE;
code = ComputeMonomorphicStub(lookup, state, extra_ic_state,
object, name);
} else {
@@ -742,22 +715,39 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
if (code.is_null()) return;
// Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED ||
- state == PREMONOMORPHIC ||
- state == MONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
- Handle<JSObject> cache_object = object->IsJSObject()
- ? Handle<JSObject>::cast(object)
- : Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name, cache_object->map(), *code);
- }
-
- if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE;
+ switch (state) {
+ case UNINITIALIZED:
+ case MONOMORPHIC_PROTOTYPE_FAILURE:
+ case PREMONOMORPHIC:
+ set_target(*code);
+ break;
+ case MONOMORPHIC:
+ if (code->ic_state() != MONOMORPHIC) {
+ Map* map = target()->FindFirstMap();
+ if (map != NULL) {
+ UpdateMegamorphicCache(map, *name, target());
+ }
+ }
+ set_target(*code);
+ break;
+ case MEGAMORPHIC: {
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe. It is not the map which holds the stub.
+ Handle<JSObject> cache_object = object->IsJSObject()
+ ? Handle<JSObject>::cast(object)
+ : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate())));
+ // Update the stub cache.
+ UpdateMegamorphicCache(cache_object->map(), *name, *code);
+ break;
+ }
+ case DEBUG_STUB:
+ break;
+ case POLYMORPHIC:
+ case GENERIC:
+ UNREACHABLE();
+ break;
+ }
+
TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC",
name, state, target());
}
@@ -766,7 +756,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup,
MaybeObject* KeyedCallIC::LoadFunction(State state,
Handle<Object> object,
Handle<Object> key) {
- if (key->IsSymbol()) {
+ if (key->IsInternalizedString()) {
return CallICBase::LoadFunction(state,
Code::kNoExtraICState,
object,
@@ -777,25 +767,26 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, key);
}
- if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) {
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+ ASSERT(!(use_ic && object->IsJSGlobalProxy()));
+
+ if (use_ic && state != MEGAMORPHIC) {
int argc = target()->arguments_count();
- Handle<Map> map =
- isolate()->factory()->non_strict_arguments_elements_map();
- if (object->IsJSObject() &&
- Handle<JSObject>::cast(object)->elements()->map() == *map) {
- Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments(
- argc, Code::KEYED_CALL_IC);
- set_target(*code);
- TRACE_IC("KeyedCallIC", key, state, target());
- } else if (!object->IsAccessCheckNeeded()) {
- Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic(
- argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
- set_target(*code);
- TRACE_IC("KeyedCallIC", key, state, target());
+ Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic(
+ argc, Code::KEYED_CALL_IC, Code::kNoExtraICState);
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = isolate()->stub_cache()->ComputeCallArguments(argc);
+ }
}
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("KeyedCallIC", key, state, target());
}
- Handle<Object> result = GetProperty(object, key);
+ Handle<Object> result = GetProperty(isolate(), object, key);
RETURN_IF_EMPTY_HANDLE(isolate(), result);
// Make receiver an object if the callee requires it. Strict mode or builtin
@@ -826,17 +817,18 @@ MaybeObject* LoadIC::Load(State state,
// objects is read-only and therefore always returns the length of
// the underlying string value. See ECMA-262 15.5.5.1.
if ((object->IsString() || object->IsStringWrapper()) &&
- name->Equals(isolate()->heap()->length_symbol())) {
+ name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
- stub = object->IsString()
- ? isolate()->builtins()->LoadIC_StringLength()
- : isolate()->builtins()->LoadIC_StringWrapperLength();
+ StringLengthStub string_length_stub(kind(), !object->IsString());
+ stub = string_length_stub.GetCode(isolate());
} else if (state == MONOMORPHIC && object->IsStringWrapper()) {
- stub = isolate()->builtins()->LoadIC_StringWrapperLength();
+ StringLengthStub string_length_stub(kind(), true);
+ stub = string_length_stub.GetCode(isolate());
} else if (state != MEGAMORPHIC) {
+ ASSERT(state != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
@@ -847,20 +839,22 @@ MaybeObject* LoadIC::Load(State state,
}
// Get the string if we have a string wrapper object.
Handle<Object> string = object->IsJSValue()
- ? Handle<Object>(Handle<JSValue>::cast(object)->value())
+ ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate())
: object;
return Smi::FromInt(String::cast(*string)->length());
}
// Use specialized code for getting the length of arrays.
if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
+ name->Equals(isolate()->heap()->length_string())) {
Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->LoadIC_ArrayLength();
+ ArrayLengthStub array_length_stub(kind());
+ stub = array_length_stub.GetCode(isolate());
} else if (state != MEGAMORPHIC) {
+ ASSERT(state != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
@@ -874,14 +868,16 @@ MaybeObject* LoadIC::Load(State state,
// Use specialized code for getting prototype of functions.
if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
+ name->Equals(isolate()->heap()->prototype_string()) &&
Handle<JSFunction>::cast(object)->should_have_prototype()) {
Handle<Code> stub;
if (state == UNINITIALIZED) {
stub = pre_monomorphic_stub();
} else if (state == PREMONOMORPHIC) {
- stub = isolate()->builtins()->LoadIC_FunctionPrototype();
+ FunctionPrototypeStub function_prototype_stub(kind());
+ stub = function_prototype_stub.GetCode(isolate());
} else if (state != MEGAMORPHIC) {
+ ASSERT(state != GENERIC);
stub = megamorphic_stub();
}
if (!stub.is_null()) {
@@ -895,9 +891,13 @@ MaybeObject* LoadIC::Load(State state,
}
// Check if the name is trivially convertible to an index and get
- // the element if so.
+ // the element or char if so.
uint32_t index;
- if (name->AsArrayIndex(&index)) return object->GetElement(index);
+ if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
+ // Rewrite to the generic keyed load stub.
+ if (FLAG_use_ic) set_target(*generic_stub());
+ return Runtime::GetElementOrCharAt(isolate(), object, index);
+ }
// Named lookup in the object.
LookupResult lookup(isolate());
@@ -905,7 +905,7 @@ MaybeObject* LoadIC::Load(State state,
// If we did not find a property, check if we need to throw an exception.
if (!lookup.IsFound()) {
- if (IsContextual(object)) {
+ if (IsUndeclaredGlobal(object)) {
return ReferenceError("not_defined", name);
}
LOG(isolate(), SuspectReadEvent(*name, *object));
@@ -924,7 +924,7 @@ MaybeObject* LoadIC::Load(State state,
RETURN_IF_EMPTY_HANDLE(isolate(), result);
// If the property is not present, check if we need to throw an
// exception.
- if (attr == ABSENT && IsContextual(object)) {
+ if (attr == ABSENT && IsUndeclaredGlobal(object)) {
return ReferenceError("not_defined", name);
}
return *result;
@@ -935,6 +935,173 @@ MaybeObject* LoadIC::Load(State state,
}
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+ Handle<Map> new_receiver_map) {
+ ASSERT(!new_receiver_map.is_null());
+ for (int current = 0; current < receiver_maps->length(); ++current) {
+ if (!receiver_maps->at(current).is_null() &&
+ receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+ return false;
+ }
+ }
+ receiver_maps->Add(new_receiver_map);
+ return true;
+}
+
+
+bool IC::UpdatePolymorphicIC(State state,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Code> code) {
+ if (code->type() == Code::NORMAL) return false;
+ if (target()->ic_state() == MONOMORPHIC &&
+ target()->type() == Code::NORMAL) {
+ return false;
+ }
+ MapHandleList receiver_maps;
+ CodeHandleList handlers;
+ target()->FindAllMaps(&receiver_maps);
+ int number_of_maps = receiver_maps.length();
+ if (number_of_maps == 0 || number_of_maps >= 4) return false;
+
+ target()->FindAllCode(&handlers, receiver_maps.length());
+
+ if (!AddOneReceiverMapIfMissing(&receiver_maps,
+ Handle<Map>(receiver->map()))) {
+ return false;
+ }
+
+ handlers.Add(code);
+ Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
+ &receiver_maps, &handlers, name);
+ set_target(*ic);
+ return true;
+}
+
+
+void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ if (handler->type() == Code::NORMAL) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
+ receiver, handler, name);
+ set_target(*ic);
+}
+
+
+void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ if (handler->type() == Code::NORMAL) return set_target(*handler);
+ Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedMonomorphicIC(
+ receiver, handler, name);
+ set_target(*ic);
+}
+
+
+void IC::PatchCache(State state,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Code> code) {
+ switch (state) {
+ case UNINITIALIZED:
+ case PREMONOMORPHIC:
+ case MONOMORPHIC_PROTOTYPE_FAILURE:
+ UpdateMonomorphicIC(receiver, code, name);
+ break;
+ case MONOMORPHIC:
+ // Only move to megamorphic if the target changes.
+ if (target() != *code) {
+ if (target()->is_load_stub()) {
+ if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
+ break;
+ }
+ }
+ // We are transitioning from monomorphic to megamorphic case. Place the
+ // stub compiled for the receiver into stub cache.
+ Map* map = target()->FindFirstMap();
+ if (map != NULL) {
+ UpdateMegamorphicCache(map, *name, target());
+ }
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ set_target((strict_mode == kStrictMode)
+ ? *megamorphic_stub_strict()
+ : *megamorphic_stub());
+ }
+ break;
+ case MEGAMORPHIC:
+ // Update the stub cache.
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ break;
+ case POLYMORPHIC:
+ if (target()->is_load_stub()) {
+ if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) {
+ break;
+ }
+ MapHandleList receiver_maps;
+ CodeHandleList handlers;
+ target()->FindAllMaps(&receiver_maps);
+ target()->FindAllCode(&handlers, receiver_maps.length());
+ for (int i = 0; i < receiver_maps.length(); i++) {
+ UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i));
+ }
+ UpdateMegamorphicCache(receiver->map(), *name, *code);
+ set_target(*megamorphic_stub());
+ } else {
+ // When trying to patch a polymorphic keyed load/store element stub
+ // with anything other than another polymorphic stub, go generic.
+ set_target((strict_mode == kStrictMode)
+ ? *generic_stub_strict()
+ : *generic_stub());
+ }
+ break;
+ case DEBUG_STUB:
+ break;
+ case GENERIC:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+static void GetReceiverMapsForStub(Handle<Code> stub,
+ MapHandleList* result) {
+ ASSERT(stub->is_inline_cache_stub());
+ switch (stub->ic_state()) {
+ case MONOMORPHIC: {
+ Map* map = stub->FindFirstMap();
+ if (map != NULL) {
+ result->Add(Handle<Map>(map));
+ }
+ break;
+ }
+ case POLYMORPHIC: {
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Handle<Object> object(info->target_object(), stub->GetIsolate());
+ if (object->IsString()) break;
+ ASSERT(object->IsMap());
+ AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
+ }
+ break;
+ }
+ case MEGAMORPHIC:
+ break;
+ case UNINITIALIZED:
+ case PREMONOMORPHIC:
+ case MONOMORPHIC_PROTOTYPE_FAILURE:
+ case GENERIC:
+ case DEBUG_STUB:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
void LoadIC::UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
@@ -945,138 +1112,99 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
// Loading properties from values is not common, so don't try to
// deal with non-JS objects here.
if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
- // Compute the code stub for this load.
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
Handle<Code> code;
if (state == UNINITIALIZED) {
// This is the first time we execute this inline cache.
// Set the target to the pre monomorphic stub to delay
// setting the monomorphic state.
code = pre_monomorphic_stub();
- } else if (!lookup->IsProperty()) {
- // Nonexistent property. The result is undefined.
- code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
} else {
- // Compute monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- code = isolate()->stub_cache()->ComputeLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- break;
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- code = isolate()->stub_cache()->ComputeLoadConstant(
- name, receiver, holder, constant);
- break;
- }
- case NORMAL:
- if (holder->IsGlobalObject()) {
- Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
- Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- code = isolate()->stub_cache()->ComputeLoadGlobal(
- name, receiver, global, cell, lookup->IsDontDelete());
- } else {
- // There is only one shared stub for loading normalized
- // properties. It does not traverse the prototype chain, so the
- // property must be found in the receiver for the stub to be
- // applicable.
- if (!holder.is_identical_to(receiver)) return;
- code = isolate()->stub_cache()->ComputeLoadNormal();
- }
- break;
- case CALLBACKS: {
-#ifdef _WIN32_WCE
- // Disable optimization for wince as the calling convention looks different.
- return;
-#endif
- Handle<Object> callback(lookup->GetCallbackObject());
- if (callback->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->getter()) == 0) return;
- if (!info->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeLoadCallback(
- name, receiver, holder, info);
- } else if (callback->IsAccessorPair()) {
- Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter());
- if (!getter->IsJSFunction()) return;
- if (holder->IsGlobalObject()) return;
- if (!holder->HasFastProperties()) return;
- code = isolate()->stub_cache()->ComputeLoadViaGetter(
- name, receiver, holder, Handle<JSFunction>::cast(getter));
- } else {
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- return;
- }
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(*holder));
- code = isolate()->stub_cache()->ComputeLoadInterceptor(
- name, receiver, holder);
- break;
- default:
- return;
- }
- }
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED ||
- state == PREMONOMORPHIC ||
- state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- // We are transitioning from monomorphic to megamorphic case.
- // Place the current monomorphic stub and stub compiled for
- // the receiver into stub cache.
- Map* map = target()->FindFirstMap();
- if (map != NULL) {
- isolate()->stub_cache()->Set(*name, map, target());
- }
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
-
- set_target(*megamorphic_stub());
- } else if (state == MEGAMORPHIC) {
- // Cache code holding map should be consistent with
- // GenerateMonomorphicCacheProbe.
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
+ code = ComputeLoadHandler(lookup, receiver, name);
+ if (code.is_null()) return;
}
+ PatchCache(state, kNonStrictMode, receiver, name, code);
TRACE_IC("LoadIC", name, state, target());
}
-Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH);
- return KeyedLoadElementStub(elements_kind).GetCode();
+void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) {
+ // Cache code holding map should be consistent with
+ // GenerateMonomorphicCacheProbe.
+ isolate()->stub_cache()->Set(name, map, code);
}
-Handle<Code> KeyedLoadIC::ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode growth_mode) {
- CodeHandleList handler_ics(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map = receiver_maps->at(i);
- Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck(
- receiver_map, strict_mode, growth_mode);
- handler_ics.Add(cached_stub);
+Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name) {
+ if (!lookup->IsProperty()) {
+ // Nonexistent property. The result is undefined.
+ return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver);
}
- KeyedLoadStubCompiler compiler(isolate());
- Handle<Code> code = compiler.CompileLoadPolymorphic(
- receiver_maps, &handler_ics);
- isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0));
- return code;
+
+ // Compute monomorphic stub.
+ Handle<JSObject> holder(lookup->holder());
+ switch (lookup->type()) {
+ case FIELD:
+ return isolate()->stub_cache()->ComputeLoadField(
+ name, receiver, holder, lookup->GetFieldIndex());
+ case CONSTANT_FUNCTION: {
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
+ return isolate()->stub_cache()->ComputeLoadConstant(
+ name, receiver, holder, constant);
+ }
+ case NORMAL:
+ if (holder->IsGlobalObject()) {
+ Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+ Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
+ return isolate()->stub_cache()->ComputeLoadGlobal(
+ name, receiver, global, cell, lookup->IsDontDelete());
+ }
+ // There is only one shared stub for loading normalized
+ // properties. It does not traverse the prototype chain, so the
+ // property must be found in the receiver for the stub to be
+ // applicable.
+ if (!holder.is_identical_to(receiver)) break;
+ return isolate()->stub_cache()->ComputeLoadNormal(name, receiver);
+ case CALLBACKS: {
+#ifdef _WIN32_WCE
+ // Disable optimization for wince as the calling convention looks different.
+ return;
+#endif
+ Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (callback->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->getter()) == 0) break;
+ if (!info->IsCompatibleReceiver(*receiver)) break;
+ return isolate()->stub_cache()->ComputeLoadCallback(
+ name, receiver, holder, info);
+ } else if (callback->IsAccessorPair()) {
+ Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
+ isolate());
+ if (!getter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ return isolate()->stub_cache()->ComputeLoadViaGetter(
+ name, receiver, holder, Handle<JSFunction>::cast(getter));
+ }
+ // TODO(dcarney): Handle correctly.
+ if (callback->IsDeclaredAccessorInfo()) break;
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
+ break;
+ }
+ case INTERCEPTOR:
+ ASSERT(HasInterceptorGetter(*holder));
+ return isolate()->stub_cache()->ComputeLoadInterceptor(
+ name, receiver, holder);
+ default:
+ break;
+ }
+ return Handle<Code>::null();
}
@@ -1086,125 +1214,103 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
if (key->IsHeapNumber()) {
double value = Handle<HeapNumber>::cast(key)->value();
if (isnan(value)) {
- key = isolate->factory()->nan_symbol();
+ key = isolate->factory()->nan_string();
} else {
int int_value = FastD2I(value);
if (value == int_value && Smi::IsValid(int_value)) {
- key = Handle<Smi>(Smi::FromInt(int_value));
+ key = Handle<Smi>(Smi::FromInt(int_value), isolate);
}
}
} else if (key->IsUndefined()) {
- key = isolate->factory()->undefined_symbol();
+ key = isolate->factory()->undefined_string();
}
return key;
}
-MaybeObject* KeyedLoadIC::Load(State state,
- Handle<Object> object,
- Handle<Object> key,
- bool force_generic_stub) {
- // Check for values that can be converted into a symbol directly or
- // is representable as a smi.
- key = TryConvertKey(key, isolate());
+Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
+ State ic_state = target()->ic_state();
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
+ // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+ // via megamorphic stubs, since they don't have a map in their relocation info
+ // and so the stubs can't be harvested for the object needed for a map check.
+ if (target()->type() != Code::NORMAL) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return generic_stub();
+ }
- // If the object is undefined or null it's illegal to try to get any
- // of its properties; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_load", object, name);
- }
+ Handle<Map> receiver_map(receiver->map());
+ MapHandleList target_receiver_maps;
+ if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
+ // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
+ // yet will do so and stay there.
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
+ }
- if (FLAG_use_ic) {
- // TODO(1073): don't ignore the current stub state.
-
- // Use specialized code for getting the length of strings.
- if (object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<String> string = Handle<String>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return Smi::FromInt(string->length());
- }
+ if (target() == *string_stub()) {
+ target_receiver_maps.Add(isolate()->factory()->string_map());
+ } else {
+ GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
+ }
+ }
- // Use specialized code for getting the length of arrays.
- if (object->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- Handle<JSArray> array = Handle<JSArray>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return array->length();
- }
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ if (ic_state == MONOMORPHIC &&
+ IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind())) {
+ return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
+ }
- // Use specialized code for getting prototype of functions.
- if (object->IsJSFunction() &&
- name->Equals(isolate()->heap()->prototype_symbol()) &&
- Handle<JSFunction>::cast(object)->should_have_prototype()) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(object);
- Handle<Code> code =
- isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype(
- name, function);
- ASSERT(!code.is_null());
- set_target(*code);
- TRACE_IC("KeyedLoadIC", name, state, target());
- return Accessors::FunctionGetPrototype(*object, 0);
- }
- }
+ ASSERT(ic_state != GENERIC);
- // Check if the name is trivially convertible to an index and get
- // the element or char if so.
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- // Rewrite to the generic keyed load stub.
- if (FLAG_use_ic) set_target(*generic_stub());
- return Runtime::GetElementOrCharAt(isolate(), object, index);
- }
+ // Determine the list of receiver maps that this call site has seen,
+ // adding the map that was just encountered.
+ if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
+ // If the miss wasn't due to an unseen map, a polymorphic stub
+ // won't help, use the generic stub.
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return generic_stub();
+ }
- // Named lookup.
- LookupResult lookup(isolate());
- LookupForRead(object, name, &lookup);
+ // If the maximum number of receiver maps has been exceeded, use the generic
+ // version of the IC.
+ if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return generic_stub();
+ }
- // If we did not find a property, check if we need to throw an exception.
- if (!lookup.IsFound() && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
+ return isolate()->stub_cache()->ComputeLoadElementPolymorphic(
+ &target_receiver_maps);
+}
- if (FLAG_use_ic) {
- UpdateCaches(&lookup, state, object, name);
- }
- PropertyAttributes attr;
- if (lookup.IsInterceptor()) {
- // Get the property.
- Handle<Object> result =
- Object::GetProperty(object, object, &lookup, name, &attr);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- // If the property is not present, check if we need to throw an
- // exception.
- if (attr == ABSENT && IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- return *result;
- }
+MaybeObject* KeyedLoadIC::Load(State state,
+ Handle<Object> object,
+ Handle<Object> key,
+ ICMissMode miss_mode) {
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
+ key = TryConvertKey(key, isolate());
- return object->GetProperty(*object, &lookup, *name, &attr);
+ if (key->IsInternalizedString()) {
+ return LoadIC::Load(state, object, Handle<String>::cast(key));
}
- // Do not use ICs for objects that require access checks (including
- // the global object).
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
+ ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
Handle<Code> stub = generic_stub();
- if (!force_generic_stub) {
+ if (miss_mode != MISS_FORCE_GENERIC) {
if (object->IsString() && key->IsNumber()) {
if (state == UNINITIALIZED) {
stub = string_stub();
@@ -1217,90 +1323,60 @@ MaybeObject* KeyedLoadIC::Load(State state,
} else if (receiver->HasIndexedInterceptor()) {
stub = indexed_interceptor_stub();
} else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
- stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
+ stub = LoadElementStub(receiver);
}
}
} else {
- TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
+ TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic");
}
- if (!stub.is_null()) set_target(*stub);
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("KeyedLoadIC", key, state, target());
}
- TRACE_IC("KeyedLoadIC", key, state, target());
- // Get the property.
return Runtime::GetObjectProperty(isolate(), object, key);
}
-void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name) {
+Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// Bail out if we didn't find a result.
- if (!lookup->IsProperty() || !lookup->IsCacheable()) return;
+ if (!lookup->IsProperty()) return Handle<Code>::null();
- if (!object->IsJSObject()) return;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return;
-
- // Compute the code stub for this load.
- Handle<Code> code;
-
- if (state == UNINITIALIZED) {
- // This is the first time we execute this inline cache.
- // Set the target to the pre monomorphic stub to delay
- // setting the monomorphic state.
- code = pre_monomorphic_stub();
- } else {
- // Compute a monomorphic stub.
- Handle<JSObject> holder(lookup->holder());
- switch (lookup->type()) {
- case FIELD:
- code = isolate()->stub_cache()->ComputeKeyedLoadField(
- name, receiver, holder, lookup->GetFieldIndex());
- break;
- case CONSTANT_FUNCTION: {
- Handle<JSFunction> constant(lookup->GetConstantFunction());
- code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
- name, receiver, holder, constant);
- break;
- }
- case CALLBACKS: {
- Handle<Object> callback_object(lookup->GetCallbackObject());
- if (!callback_object->IsAccessorInfo()) return;
- Handle<AccessorInfo> callback =
- Handle<AccessorInfo>::cast(callback_object);
- if (v8::ToCData<Address>(callback->getter()) == 0) return;
- if (!callback->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeKeyedLoadCallback(
- name, receiver, holder, callback);
- break;
- }
- case INTERCEPTOR:
- ASSERT(HasInterceptorGetter(lookup->holder()));
- code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
- name, receiver, holder);
- break;
- default:
- // Always rewrite to the generic case so that we do not
- // repeatedly try to rewrite.
- code = generic_stub();
- break;
+ // Compute a monomorphic stub.
+ Handle<JSObject> holder(lookup->holder());
+ switch (lookup->type()) {
+ case FIELD:
+ return isolate()->stub_cache()->ComputeKeyedLoadField(
+ name, receiver, holder, lookup->GetFieldIndex());
+ case CONSTANT_FUNCTION: {
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
+ return isolate()->stub_cache()->ComputeKeyedLoadConstant(
+ name, receiver, holder, constant);
}
+ case CALLBACKS: {
+ Handle<Object> callback_object(lookup->GetCallbackObject(), isolate());
+ // TODO(dcarney): Handle DeclaredAccessorInfo correctly.
+ if (!callback_object->IsExecutableAccessorInfo()) break;
+ Handle<ExecutableAccessorInfo> callback =
+ Handle<ExecutableAccessorInfo>::cast(callback_object);
+ if (v8::ToCData<Address>(callback->getter()) == 0) break;
+ if (!callback->IsCompatibleReceiver(*receiver)) break;
+ return isolate()->stub_cache()->ComputeKeyedLoadCallback(
+ name, receiver, holder, callback);
+ }
+ case INTERCEPTOR:
+ ASSERT(HasInterceptorGetter(lookup->holder()));
+ return isolate()->stub_cache()->ComputeKeyedLoadInterceptor(
+ name, receiver, holder);
+ default:
+ // Always rewrite to the generic case so that we do not
+ // repeatedly try to rewrite.
+ return generic_stub();
}
-
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- set_target(*megamorphic_stub());
- }
-
- TRACE_IC("KeyedLoadIC", name, state, target());
+ return Handle<Code>::null();
}
@@ -1346,30 +1422,30 @@ MaybeObject* StoreIC::Store(State state,
StrictModeFlag strict_mode,
Handle<Object> object,
Handle<String> name,
- Handle<Object> value) {
- if (!object->IsJSObject()) {
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->
- SetProperty(*name, *value, NONE, strict_mode);
- }
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode) {
+ // Handle proxies.
+ if (object->IsJSProxy()) {
+ return JSProxy::cast(*object)->
+ SetProperty(*name, *value, NONE, strict_mode);
+ }
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
+ // If the object is undefined or null it's illegal to try to set any
+ // properties on it; throw a TypeError in that case.
+ if (object->IsUndefined() || object->IsNull()) {
+ return TypeError("non_object_property_store", object, name);
+ }
- // The length property of string values is read-only. Throw in strict mode.
- if (strict_mode == kStrictMode && object->IsString() &&
- name->Equals(isolate()->heap()->length_symbol())) {
- return TypeError("strict_read_only_property", object, name);
- }
- // Ignore other stores where the receiver is not a JSObject.
- // TODO(1475): Must check prototype chains of object wrappers.
- return *value;
+ // The length property of string values is read-only. Throw in strict mode.
+ if (strict_mode == kStrictMode && object->IsString() &&
+ name->Equals(isolate()->heap()->length_string())) {
+ return TypeError("strict_read_only_property", object, name);
}
+ // Ignore other stores where the receiver is not a JSObject.
+ // TODO(1475): Must check prototype chains of object wrappers.
+ if (!object->IsJSObject()) return *value;
+
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
// Check if the given name is an array index.
@@ -1383,66 +1459,51 @@ MaybeObject* StoreIC::Store(State state,
// Observed objects are always modified through the runtime.
if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
}
// Use specialized code for setting the length of arrays with fast
- // properties. Slow properties might indicate redefinition of the
- // length property.
- if (receiver->IsJSArray() &&
- name->Equals(isolate()->heap()->length_symbol()) &&
+ // properties. Slow properties might indicate redefinition of the length
+ // property.
+ if (FLAG_use_ic &&
+ receiver->IsJSArray() &&
+ name->Equals(isolate()->heap()->length_string()) &&
Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() &&
receiver->HasFastProperties()) {
-#ifdef DEBUG
- if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
-#endif
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? isolate()->builtins()->StoreIC_ArrayLength_Strict()
- : isolate()->builtins()->StoreIC_ArrayLength();
+ Handle<Code> stub =
+ StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate());
set_target(*stub);
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ TRACE_IC("StoreIC", name, state, *stub);
+ return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
}
- // Lookup the property locally in the receiver.
- if (!receiver->IsJSGlobalProxy()) {
- LookupResult lookup(isolate());
-
- if (LookupForWrite(receiver, name, &lookup)) {
- if (FLAG_use_ic) { // Generate a stub for this store.
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- } else {
- // Strict mode doesn't allow setting non-existent global property
- // or an assignment to a read only property.
- if (strict_mode == kStrictMode) {
- if (lookup.IsProperty() && lookup.IsReadOnly()) {
- return TypeError("strict_read_only_property", object, name);
- } else if (IsContextual(object)) {
- return ReferenceError("not_defined", name);
- }
- }
+ if (receiver->IsJSGlobalProxy()) {
+ if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) {
+ // Generate a generic stub that goes to the runtime when we see a global
+ // proxy as receiver.
+ Handle<Code> stub = (strict_mode == kStrictMode)
+ ? global_proxy_stub_strict()
+ : global_proxy_stub();
+ set_target(*stub);
+ TRACE_IC("StoreIC", name, state, *stub);
}
+ return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
}
- if (receiver->IsJSGlobalProxy()) {
- // TODO(ulan): find out why we patch this site even with --no-use-ic
- // Generate a generic stub that goes to the runtime when we see a global
- // proxy as receiver.
- Handle<Code> stub = (strict_mode == kStrictMode)
- ? global_proxy_stub_strict()
- : global_proxy_stub();
- if (target() != *stub) {
- set_target(*stub);
- TRACE_IC("StoreIC", name, state, target());
+ LookupResult lookup(isolate());
+ if (LookupForWrite(receiver, name, &lookup)) {
+ if (FLAG_use_ic) {
+ UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
}
+ } else if (strict_mode == kStrictMode &&
+ !(lookup.IsProperty() && lookup.IsReadOnly()) &&
+ IsUndeclaredGlobal(object)) {
+ // Strict mode doesn't allow setting non-existent global property.
+ return ReferenceError("not_defined", name);
}
// Set the property.
- return receiver->SetProperty(*name,
- *value,
- NONE,
- strict_mode,
- JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+ return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode);
}
@@ -1459,24 +1520,25 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// These are not cacheable, so we never see such LookupResults here.
ASSERT(!lookup->IsHandler());
- // If the property has a non-field type allowing map transitions
- // where there is extra room in the object, we leave the IC in its
- // current state.
- PropertyType type = lookup->type();
+ Handle<Code> code =
+ ComputeStoreMonomorphic(lookup, strict_mode, receiver, name);
+ if (code.is_null()) return;
+
+ PatchCache(state, strict_mode, receiver, name, code);
+ TRACE_IC("StoreIC", name, state, target());
+}
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
+
+Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name) {
Handle<JSObject> holder(lookup->holder());
- Handle<Code> code;
- switch (type) {
+ switch (lookup->type()) {
case FIELD:
- code = isolate()->stub_cache()->ComputeStoreField(name,
- receiver,
- lookup->GetFieldIndex(),
- Handle<Map>::null(),
- strict_mode);
- break;
+ return isolate()->stub_cache()->ComputeStoreField(
+ name, receiver, lookup->GetFieldIndex().field_index(),
+ Handle<Map>::null(), strict_mode);
case NORMAL:
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
@@ -1484,44 +1546,43 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
// global object.
Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup));
- code = isolate()->stub_cache()->ComputeStoreGlobal(
+ return isolate()->stub_cache()->ComputeStoreGlobal(
name, global, cell, strict_mode);
- } else {
- if (!holder.is_identical_to(receiver)) return;
- code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
}
- break;
+ if (!holder.is_identical_to(receiver)) break;
+ return isolate()->stub_cache()->ComputeStoreNormal(strict_mode);
case CALLBACKS: {
- Handle<Object> callback(lookup->GetCallbackObject());
- if (callback->IsAccessorInfo()) {
- Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback);
- if (v8::ToCData<Address>(info->setter()) == 0) return;
- if (!holder->HasFastProperties()) return;
- if (!info->IsCompatibleReceiver(*receiver)) return;
- code = isolate()->stub_cache()->ComputeStoreCallback(
+ Handle<Object> callback(lookup->GetCallbackObject(), isolate());
+ if (callback->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> info =
+ Handle<ExecutableAccessorInfo>::cast(callback);
+ if (v8::ToCData<Address>(info->setter()) == 0) break;
+ if (!holder->HasFastProperties()) break;
+ if (!info->IsCompatibleReceiver(*receiver)) break;
+ return isolate()->stub_cache()->ComputeStoreCallback(
name, receiver, holder, info, strict_mode);
} else if (callback->IsAccessorPair()) {
- Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter());
- if (!setter->IsJSFunction()) return;
- if (holder->IsGlobalObject()) return;
- if (!holder->HasFastProperties()) return;
- code = isolate()->stub_cache()->ComputeStoreViaSetter(
+ Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter(),
+ isolate());
+ if (!setter->IsJSFunction()) break;
+ if (holder->IsGlobalObject()) break;
+ if (!holder->HasFastProperties()) break;
+ return isolate()->stub_cache()->ComputeStoreViaSetter(
name, receiver, holder, Handle<JSFunction>::cast(setter),
strict_mode);
- } else {
- ASSERT(callback->IsForeign());
- // No IC support for old-style native accessors.
- return;
}
+ // TODO(dcarney): Handle correctly.
+ if (callback->IsDeclaredAccessorInfo()) break;
+ ASSERT(callback->IsForeign());
+ // No IC support for old-style native accessors.
break;
}
case INTERCEPTOR:
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
- code = isolate()->stub_cache()->ComputeStoreInterceptor(
+ return isolate()->stub_cache()->ComputeStoreInterceptor(
name, receiver, strict_mode);
- break;
case CONSTANT_FUNCTION:
- return;
+ break;
case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget());
int descriptor = transition->LastAdded();
@@ -1529,80 +1590,24 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
DescriptorArray* target_descriptors = transition->instance_descriptors();
PropertyDetails details = target_descriptors->GetDetails(descriptor);
- if (details.type() != FIELD || details.attributes() != NONE) return;
+ if (details.type() != FIELD || details.attributes() != NONE) break;
int field_index = target_descriptors->GetFieldIndex(descriptor);
- code = isolate()->stub_cache()->ComputeStoreField(
+ return isolate()->stub_cache()->ComputeStoreField(
name, receiver, field_index, transition, strict_mode);
-
- break;
}
case NONEXISTENT:
case HANDLER:
UNREACHABLE();
- return;
- }
-
- // Patch the call site depending on the state of the cache.
- if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- // Only move to megamorphic if the target changes.
- if (target() != *code) {
- set_target((strict_mode == kStrictMode)
- ? megamorphic_stub_strict()
- : megamorphic_stub());
- }
- } else if (state == MEGAMORPHIC) {
- // Update the stub cache.
- isolate()->stub_cache()->Set(*name, receiver->map(), *code);
- }
-
- TRACE_IC("StoreIC", name, state, target());
-}
-
-
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
- Handle<Map> new_receiver_map) {
- ASSERT(!new_receiver_map.is_null());
- for (int current = 0; current < receiver_maps->length(); ++current) {
- if (!receiver_maps->at(current).is_null() &&
- receiver_maps->at(current).is_identical_to(new_receiver_map)) {
- return false;
- }
- }
- receiver_maps->Add(new_receiver_map);
- return true;
-}
-
-
-void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub,
- MapHandleList* result) {
- ASSERT(stub->is_inline_cache_stub());
- if (!string_stub().is_null() && stub.is_identical_to(string_stub())) {
- return result->Add(isolate()->factory()->string_map());
- } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) {
- if (stub->ic_state() == MONOMORPHIC) {
- result->Add(Handle<Map>(stub->FindFirstMap()));
- } else {
- ASSERT(stub->ic_state() == MEGAMORPHIC);
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*stub, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Handle<Object> object(info->target_object());
- ASSERT(object->IsMap());
- AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object));
- }
- }
+ break;
}
+ return Handle<Code>::null();
}
-Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> generic_stub) {
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
+ StrictModeFlag strict_mode) {
State ic_state = target()->ic_state();
KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind)
? ALLOW_JSARRAY_GROWTH
@@ -1612,66 +1617,70 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != Code::NORMAL) {
- TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+ return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
- bool monomorphic = false;
- bool is_transition_stub = IsTransitionStubKind(stub_kind);
Handle<Map> receiver_map(receiver->map());
- Handle<Map> monomorphic_map = receiver_map;
MapHandleList target_receiver_maps;
if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) {
// Optimistically assume that ICs that haven't reached the MONOMORPHIC state
// yet will do so and stay there.
- monomorphic = true;
- } else {
- GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
- if (ic_state == MONOMORPHIC && (is_transition_stub || stub_kind == LOAD)) {
- // The first time a receiver is seen that is a transitioned version of the
- // previous monomorphic receiver type, assume the new ElementsKind is the
- // monomorphic type. This benefits global arrays that only transition
- // once, and all call sites accessing them are faster if they remain
- // monomorphic. If this optimistic assumption is not true, the IC will
- // miss again and it will become polymorphic and support both the
- // untransitioned and transitioned maps.
- monomorphic = IsMoreGeneralElementsKindTransition(
- target_receiver_maps.at(0)->elements_kind(),
- receiver->GetElementsKind());
- }
+ Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
+ stub_kind = GetNoTransitionStubKind(stub_kind);
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ monomorphic_map, stub_kind, strict_mode, grow_mode);
}
- if (monomorphic) {
- if (is_transition_stub) {
- monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
- ASSERT(*monomorphic_map != *receiver_map);
- stub_kind = GetNoTransitionStubKind(stub_kind);
- }
- return ComputeMonomorphicStub(
- monomorphic_map, stub_kind, strict_mode, generic_stub);
+ GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps);
+ if (target_receiver_maps.length() == 0) {
+ // Optimistically assume that ICs that haven't reached the MONOMORPHIC state
+ // yet will do so and stay there.
+ stub_kind = GetNoTransitionStubKind(stub_kind);
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ receiver_map, stub_kind, strict_mode, grow_mode);
+ }
+ // The first time a receiver is seen that is a transitioned version of the
+ // previous monomorphic receiver type, assume the new ElementsKind is the
+ // monomorphic type. This benefits global arrays that only transition
+ // once, and all call sites accessing them are faster if they remain
+ // monomorphic. If this optimistic assumption is not true, the IC will
+ // miss again and it will become polymorphic and support both the
+ // untransitioned and transitioned maps.
+ if (ic_state == MONOMORPHIC &&
+ IsTransitionStubKind(stub_kind) &&
+ IsMoreGeneralElementsKindTransition(
+ target_receiver_maps.at(0)->elements_kind(),
+ receiver->GetElementsKind())) {
+ Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, stub_kind);
+ ASSERT(*monomorphic_map != *receiver_map);
+ stub_kind = GetNoTransitionStubKind(stub_kind);
+ return isolate()->stub_cache()->ComputeKeyedStoreElement(
+ monomorphic_map, stub_kind, strict_mode, grow_mode);
}
- ASSERT(target() != *generic_stub);
- // Determine the list of receiver maps that this call site has seen,
- // adding the map that was just encountered.
+ ASSERT(ic_state != GENERIC);
+
bool map_added =
AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+
if (IsTransitionStubKind(stub_kind)) {
Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind);
map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map);
}
+
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
- TRACE_GENERIC_IC("KeyedIC", "same map added twice");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+ return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
- TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
- return generic_stub;
+ TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+ return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub();
}
if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) ==
@@ -1679,136 +1688,43 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
grow_mode = ALLOW_JSARRAY_GROWTH;
}
- Handle<PolymorphicCodeCache> cache =
- isolate()->factory()->polymorphic_code_cache();
- Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
- strict_mode);
- Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state);
- Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- Handle<Code> stub =
- ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode);
- PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub);
- return stub;
-}
-
-
-Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck(
- Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) {
- if ((receiver_map->instance_type() & kNotStringTag) == 0) {
- ASSERT(!string_stub().is_null());
- return string_stub();
- } else {
- ASSERT(receiver_map->has_dictionary_elements() ||
- receiver_map->has_fast_smi_or_object_elements() ||
- receiver_map->has_fast_double_elements() ||
- receiver_map->has_external_array_elements());
- bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
- return GetElementStubWithoutMapCheck(is_js_array,
- receiver_map->elements_kind(),
- grow_mode);
- }
-}
-
-
-Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<Map> receiver_map,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> generic_stub) {
- ElementsKind elements_kind = receiver_map->elements_kind();
- if (IsFastElementsKind(elements_kind) ||
- IsExternalArrayElementsKind(elements_kind) ||
- IsDictionaryElementsKind(elements_kind)) {
- return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement(
- receiver_map, stub_kind, strict_mode);
- } else {
- return generic_stub;
- }
+ return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
+ &target_receiver_maps, grow_mode, strict_mode);
}
-Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind) {
+Handle<Map> KeyedStoreIC::ComputeTransitionedMap(Handle<JSObject> receiver,
+ StubKind stub_kind) {
switch (stub_kind) {
- case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
+ case STORE_TRANSITION_SMI_TO_OBJECT:
+ case STORE_TRANSITION_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE:
- case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
+ case STORE_TRANSITION_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_ELEMENTS);
- case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
return JSObject::GetElementsTransitionMap(receiver,
FAST_HOLEY_DOUBLE_ELEMENTS);
- case KeyedIC::LOAD:
- case KeyedIC::STORE_NO_TRANSITION:
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
- UNREACHABLE();
- break;
+ case STORE_NO_TRANSITION:
+ case STORE_AND_GROW_NO_TRANSITION:
+ return Handle<Map>(receiver->map());
}
return Handle<Map>::null();
}
-Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) {
- return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode();
-}
-
-
-Handle<Code> KeyedStoreIC::ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) {
- // Collect MONOMORPHIC stubs for all target_receiver_maps.
- CodeHandleList handler_ics(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
- Handle<Code> cached_stub;
- Handle<Map> transitioned_map =
- receiver_map->FindTransitionedMap(receiver_maps);
- if (!transitioned_map.is_null()) {
- cached_stub = ElementsTransitionAndStoreStub(
- receiver_map->elements_kind(), // original elements_kind
- transitioned_map->elements_kind(),
- receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array
- strict_mode, grow_mode).GetCode();
- } else {
- cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map,
- strict_mode,
- grow_mode);
- }
- ASSERT(!cached_stub.is_null());
- handler_ics.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
- }
- KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
- Handle<Code> code = compiler.CompileStorePolymorphic(
- receiver_maps, &handler_ics, &transitioned_maps);
- isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
- PROFILE(isolate(),
- CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0));
- return code;
-}
-
-
-KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
- Handle<Object> key,
- Handle<Object> value) {
+KeyedStoreIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver,
+ Handle<Object> key,
+ Handle<Object> value) {
ASSERT(key->IsSmi());
int index = Smi::cast(*key)->value();
bool allow_growth = receiver->IsJSArray() &&
@@ -1877,54 +1793,20 @@ MaybeObject* KeyedStoreIC::Store(State state,
Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
- bool force_generic) {
- // Check for values that can be converted into a symbol directly or
- // is representable as a smi.
+ ICMissMode miss_mode) {
+ // Check for values that can be converted into an internalized string directly
+ // or is representable as a smi.
key = TryConvertKey(key, isolate());
- if (key->IsSymbol()) {
- Handle<String> name = Handle<String>::cast(key);
-
- // Handle proxies.
- if (object->IsJSProxy()) {
- return JSProxy::cast(*object)->SetProperty(
- *name, *value, NONE, strict_mode);
- }
-
- // If the object is undefined or null it's illegal to try to set any
- // properties on it; throw a TypeError in that case.
- if (object->IsUndefined() || object->IsNull()) {
- return TypeError("non_object_property_store", object, name);
- }
-
- // Ignore stores where the receiver is not a JSObject.
- if (!object->IsJSObject()) return *value;
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
- // Check if the given name is an array index.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- Handle<Object> result =
- JSObject::SetElement(receiver, index, value, NONE, strict_mode);
- RETURN_IF_EMPTY_HANDLE(isolate(), result);
- return *value;
- }
-
- // Update inline cache and stub cache.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy() &&
- !(FLAG_harmony_observation && receiver->map()->is_observed())) {
- LookupResult lookup(isolate());
- if (LookupForWrite(receiver, name, &lookup)) {
- UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
- }
- }
-
- // Set the property.
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
+ if (key->IsInternalizedString()) {
+ return StoreIC::Store(state,
+ strict_mode,
+ object,
+ Handle<String>::cast(key),
+ value,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED);
}
- // Do not use ICs for objects that require access checks (including
- // the global object), or are observed.
bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
!(FLAG_harmony_observation && object->IsJSObject() &&
JSObject::cast(*object)->map()->is_observed());
@@ -1934,60 +1816,42 @@ MaybeObject* KeyedStoreIC::Store(State state,
Handle<Code> stub = (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->non_strict_arguments_elements_map()) {
- stub = non_strict_arguments_stub();
- } else if (!force_generic) {
- if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
+ if (miss_mode != MISS_FORCE_GENERIC) {
+ if (object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ if (receiver->elements()->map() ==
+ isolate()->heap()->non_strict_arguments_elements_map()) {
+ stub = non_strict_arguments_stub();
+ } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) {
StubKind stub_kind = GetStubKind(receiver, key, value);
- stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
+ stub = StoreElementStub(receiver, stub_kind, strict_mode);
}
- } else {
- TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
}
+ } else {
+ TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic");
}
- if (!stub.is_null()) set_target(*stub);
+ ASSERT(!stub.is_null());
+ set_target(*stub);
+ TRACE_IC("KeyedStoreIC", key, state, target());
}
- TRACE_IC("KeyedStoreIC", key, state, target());
-
- // Set the property.
return Runtime::SetObjectProperty(
isolate(), object , key, value, NONE, strict_mode);
}
-void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value) {
- ASSERT(!receiver->IsJSGlobalProxy());
- ASSERT(StoreICableLookup(lookup));
- ASSERT(lookup->IsFound());
-
- // These are not cacheable, so we never see such LookupResults here.
- ASSERT(!lookup->IsHandler());
-
+Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name) {
// If the property has a non-field type allowing map transitions
// where there is extra room in the object, we leave the IC in its
// current state.
- PropertyType type = lookup->type();
-
- // Compute the code stub for this store; used for rewriting to
- // monomorphic state and making sure that the code stub is in the
- // stub cache.
- Handle<Code> code;
-
- switch (type) {
+ switch (lookup->type()) {
case FIELD:
- code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup->GetFieldIndex(),
+ return isolate()->stub_cache()->ComputeKeyedStoreField(
+ name, receiver, lookup->GetFieldIndex().field_index(),
Handle<Map>::null(), strict_mode);
- break;
case TRANSITION: {
Handle<Map> transition(lookup->GetTransitionTarget());
int descriptor = transition->LastAdded();
@@ -1997,9 +1861,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
if (details.type() == FIELD && details.attributes() == NONE) {
int field_index = target_descriptors->GetFieldIndex(descriptor);
- code = isolate()->stub_cache()->ComputeKeyedStoreField(
+ return isolate()->stub_cache()->ComputeKeyedStoreField(
name, receiver, field_index, transition, strict_mode);
- break;
}
// fall through.
}
@@ -2009,30 +1872,15 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
case INTERCEPTOR:
// Always rewrite to the generic case so that we do not
// repeatedly try to rewrite.
- code = (strict_mode == kStrictMode)
+ return (strict_mode == kStrictMode)
? generic_stub_strict()
: generic_stub();
- break;
case HANDLER:
case NONEXISTENT:
UNREACHABLE();
- return;
- }
-
- ASSERT(!code.is_null());
-
- // Patch the call site depending on the state of the cache. Make
- // sure to always rewrite from monomorphic to megamorphic.
- ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE);
- if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
- set_target(*code);
- } else if (state == MONOMORPHIC) {
- set_target((strict_mode == kStrictMode)
- ? *megamorphic_stub_strict()
- : *megamorphic_stub());
+ break;
}
-
- TRACE_IC("KeyedStoreIC", name, state, target());
+ return Handle<Code>::null();
}
@@ -2054,13 +1902,12 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) {
extra_ic_state,
args.at<Object>(0),
args.at<String>(1));
- // Result could be a function or a failure.
- JSFunction* raw_function = NULL;
+ JSFunction* raw_function;
if (!maybe_result->To(&raw_function)) return maybe_result;
// The first time the inline cache is updated may be the first time the
- // function it references gets called. If the function is lazily compiled
- // then the first call will trigger a compilation. We check for this case
+ // function it references gets called. If the function is lazily compiled
+ // then the first call will trigger a compilation. We check for this case
// and we do the compilation immediately, instead of waiting for the stub
// currently attached to the JSFunction object to trigger compilation.
if (raw_function->is_compiled()) return raw_function;
@@ -2095,7 +1942,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- LoadIC ic(isolate);
+ LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
return ic.Load(state, args.at<Object>(0), args.at<String>(1));
}
@@ -2105,24 +1952,36 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- KeyedLoadIC ic(isolate);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+ IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
+ return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 2);
+ KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false);
+ return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
- KeyedLoadIC ic(isolate);
+ KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
- return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true);
+ return ic.Load(state,
+ args.at<Object>(0),
+ args.at<Object>(1),
+ MISS_FORCE_GENERIC);
}
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 3);
StoreIC ic(isolate);
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
@@ -2136,7 +1995,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) {
RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
- NoHandleAllocation nha;
+ NoHandleAllocation nha(isolate);
ASSERT(args.length() == 2);
JSArray* receiver = JSArray::cast(args[0]);
@@ -2148,14 +2007,14 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
#ifdef DEBUG
// The length property has to be a writable callback property.
LookupResult debug_lookup(isolate);
- receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup);
+ receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup);
ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
#endif
Object* result;
- { MaybeObject* maybe_result = receiver->SetElementsLength(len);
- if (!maybe_result->ToObject(&result)) return maybe_result;
- }
+ MaybeObject* maybe_result = receiver->SetElementsLength(len);
+ if (!maybe_result->To(&result)) return maybe_result;
+
return len;
}
@@ -2164,7 +2023,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) {
// it is necessary to extend the properties array of a
// JSObject.
RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
// Convert the parameters
@@ -2208,12 +2067,12 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) {
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
- false);
+ MISS);
}
RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
KeyedStoreIC ic(isolate);
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
@@ -2241,7 +2100,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) {
args.at<Object>(0),
args.at<Object>(1),
args.at<Object>(2),
- true);
+ MISS_FORCE_GENERIC);
}
@@ -2254,7 +2113,7 @@ const char* UnaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "Smi";
- case HEAP_NUMBER: return "HeapNumbers";
+ case NUMBER: return "Number";
case GENERIC: return "Generic";
default: return "Invalid";
}
@@ -2266,10 +2125,10 @@ UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) {
case UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
case SMI:
- case HEAP_NUMBER:
+ case NUMBER:
return MONOMORPHIC;
case GENERIC:
- return MEGAMORPHIC;
+ return ::v8::internal::GENERIC;
}
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
@@ -2281,7 +2140,7 @@ UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
if (operand_type.IsSmi()) {
return SMI;
} else if (operand_type.IsNumber()) {
- return HEAP_NUMBER;
+ return NUMBER;
} else {
return GENERIC;
}
@@ -2289,24 +2148,22 @@ UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) {
UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType(
- UnaryOpIC::TypeInfo current_type,
- UnaryOpIC::TypeInfo previous_type) {
+ TypeInfo current_type,
+ TypeInfo previous_type) {
switch (previous_type) {
- case UnaryOpIC::UNINITIALIZED:
+ case UNINITIALIZED:
return current_type;
- case UnaryOpIC::SMI:
- return (current_type == UnaryOpIC::GENERIC)
- ? UnaryOpIC::GENERIC
- : UnaryOpIC::HEAP_NUMBER;
- case UnaryOpIC::HEAP_NUMBER:
- return UnaryOpIC::GENERIC;
- case UnaryOpIC::GENERIC:
+ case SMI:
+ return (current_type == GENERIC) ? GENERIC : NUMBER;
+ case NUMBER:
+ return GENERIC;
+ case GENERIC:
// We should never do patching if we are in GENERIC state.
UNREACHABLE();
- return UnaryOpIC::GENERIC;
+ return GENERIC;
}
UNREACHABLE();
- return UnaryOpIC::GENERIC;
+ return GENERIC;
}
@@ -2318,12 +2175,11 @@ void BinaryOpIC::patch(Code* code) {
const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
- case SMI: return "SMI";
- case INT32: return "Int32s";
- case HEAP_NUMBER: return "HeapNumbers";
+ case SMI: return "Smi";
+ case INT32: return "Int32";
+ case NUMBER: return "Number";
case ODDBALL: return "Oddball";
- case BOTH_STRING: return "BothStrings";
- case STRING: return "Strings";
+ case STRING: return "String";
case GENERIC: return "Generic";
default: return "Invalid";
}
@@ -2336,71 +2192,18 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
return ::v8::internal::UNINITIALIZED;
case SMI:
case INT32:
- case HEAP_NUMBER:
+ case NUMBER:
case ODDBALL:
- case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
- return MEGAMORPHIC;
+ return ::v8::internal::GENERIC;
}
UNREACHABLE();
return ::v8::internal::UNINITIALIZED;
}
-BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
- BinaryOpIC::TypeInfo y) {
- if (x == UNINITIALIZED) return y;
- if (y == UNINITIALIZED) return x;
- if (x == y) return x;
- if (x == BOTH_STRING && y == STRING) return STRING;
- if (x == STRING && y == BOTH_STRING) return STRING;
- if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
- return GENERIC;
- }
- if (x > y) return x;
- return y;
-}
-
-
-BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
- Handle<Object> right) {
- ::v8::internal::TypeInfo left_type =
- ::v8::internal::TypeInfo::TypeFromValue(left);
- ::v8::internal::TypeInfo right_type =
- ::v8::internal::TypeInfo::TypeFromValue(right);
-
- if (left_type.IsSmi() && right_type.IsSmi()) {
- return SMI;
- }
-
- if (left_type.IsInteger32() && right_type.IsInteger32()) {
- // Platforms with 32-bit Smis have no distinct INT32 type.
- if (kSmiValueSize == 32) return SMI;
- return INT32;
- }
-
- if (left_type.IsNumber() && right_type.IsNumber()) {
- return HEAP_NUMBER;
- }
-
- // Patching for fast string ADD makes sense even if only one of the
- // arguments is a string.
- if (left_type.IsString()) {
- return right_type.IsString() ? BOTH_STRING : STRING;
- } else if (right_type.IsString()) {
- return STRING;
- }
-
- // Check for oddball objects.
- if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
- if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
-
- return GENERIC;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
@@ -2415,13 +2218,16 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
type = UnaryOpIC::ComputeNewType(type, previous_type);
UnaryOpStub stub(op, mode, type);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
if (!code.is_null()) {
if (FLAG_trace_ic) {
- PrintF("[UnaryOpIC (%s->%s)#%s]\n",
+ PrintF("[UnaryOpIC in ");
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
+ PrintF(" (%s->%s)#%s @ %p]\n",
UnaryOpIC::GetName(previous_type),
UnaryOpIC::GetName(type),
- Token::Name(op));
+ Token::Name(op),
+ static_cast<void*>(*code));
}
UnaryOpIC ic(isolate);
ic.patch(*code);
@@ -2452,25 +2258,72 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
return *result;
}
+
+static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
+ Token::Value op) {
+ ::v8::internal::TypeInfo type =
+ ::v8::internal::TypeInfo::TypeFromValue(value);
+ if (type.IsSmi()) return BinaryOpIC::SMI;
+ if (type.IsInteger32()) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ if (type.IsNumber()) return BinaryOpIC::NUMBER;
+ if (type.IsString()) return BinaryOpIC::STRING;
+ if (value->IsUndefined()) {
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ if (kSmiValueSize == 32) return BinaryOpIC::SMI;
+ return BinaryOpIC::INT32;
+ }
+ return BinaryOpIC::ODDBALL;
+ }
+ return BinaryOpIC::GENERIC;
+}
+
+
+static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
+ Handle<Object> value,
+ Token::Value op) {
+ BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
+ if (old_type == BinaryOpIC::STRING) {
+ if (new_type == BinaryOpIC::STRING) return new_type;
+ return BinaryOpIC::GENERIC;
+ }
+ return Max(old_type, new_type);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 3);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
- Token::Value op = static_cast<Token::Value>(args.smi_at(3));
- BinaryOpIC::TypeInfo previous_type =
- static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
+ Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
+ BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
+ BinaryOpStub::decode_types_from_minor_key(
+ key, &previous_left, &previous_right, &unused_previous_result);
- BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
- type = BinaryOpIC::JoinTypes(type, previous_type);
+ BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
+ BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
- if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
+
+ // STRING is only used for ADD operations.
+ if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
op != Token::ADD) {
- type = BinaryOpIC::GENERIC;
+ new_left = new_right = BinaryOpIC::GENERIC;
}
- if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
+
+ BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
+ BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
+
+ if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
@@ -2479,32 +2332,41 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
// That is the only way to get here from the Smi stub.
// With 32-bit Smis, all overflows give heap numbers, but with
// 31-bit Smis, most operations overflow to int32 results.
- result_type = BinaryOpIC::HEAP_NUMBER;
+ result_type = BinaryOpIC::NUMBER;
} else {
// Other operations on SMIs that overflow yield int32s.
result_type = BinaryOpIC::INT32;
}
}
- if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
- // We must be here because an operation on two INT32 types overflowed.
- result_type = BinaryOpIC::HEAP_NUMBER;
+ if (new_overall == BinaryOpIC::INT32 &&
+ previous_overall == BinaryOpIC::INT32) {
+ if (new_left == previous_left && new_right == previous_right) {
+ result_type = BinaryOpIC::NUMBER;
+ }
}
- BinaryOpStub stub(key, type, result_type);
- Handle<Code> code = stub.GetCode();
+ BinaryOpStub stub(key, new_left, new_right, result_type);
+ Handle<Code> code = stub.GetCode(isolate);
if (!code.is_null()) {
+#ifdef DEBUG
if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
- BinaryOpIC::GetName(previous_type),
- BinaryOpIC::GetName(type),
+ PrintF("[BinaryOpIC in ");
+ JavaScriptFrame::PrintTop(isolate, stdout, false, true);
+ PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
+ BinaryOpIC::GetName(previous_left),
+ BinaryOpIC::GetName(previous_right),
+ BinaryOpIC::GetName(new_left),
+ BinaryOpIC::GetName(new_right),
BinaryOpIC::GetName(result_type),
- Token::Name(op));
+ Token::Name(op),
+ static_cast<void*>(*code));
}
+#endif
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
- if (previous_type == BinaryOpIC::UNINITIALIZED) {
+ if (previous_overall == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
@@ -2567,43 +2429,29 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code));
+ CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
return code;
}
-Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED);
- return stub.GetCode();
-}
-
-
-CompareIC::State CompareIC::ComputeState(Code* target) {
- int key = target->major_key();
- if (key == CodeStub::Compare) return GENERIC;
- ASSERT(key == CodeStub::CompareIC);
- return static_cast<State>(target->compare_state());
-}
-
-
-Token::Value CompareIC::ComputeOperation(Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- return static_cast<Token::Value>(
- target->compare_operation() + Token::EQ);
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+ ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ return stub.GetCode(isolate);
}
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
- case SMIS: return "SMIS";
- case HEAP_NUMBERS: return "HEAP_NUMBERS";
- case OBJECTS: return "OBJECTS";
- case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
- case SYMBOLS: return "SYMBOLS";
- case STRINGS: return "STRINGS";
+ case SMI: return "SMI";
+ case NUMBER: return "NUMBER";
+ case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
+ case STRING: return "STRING";
+ case UNIQUE_NAME: return "UNIQUE_NAME";
+ case OBJECT: return "OBJECT";
+ case KNOWN_OBJECT: return "KNOWN_OBJECT";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
@@ -2612,61 +2460,153 @@ const char* CompareIC::GetStateName(State state) {
}
-CompareIC::State CompareIC::TargetState(State state,
+static CompareIC::State InputState(CompareIC::State old_state,
+ Handle<Object> value) {
+ switch (old_state) {
+ case CompareIC::UNINITIALIZED:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::NUMBER;
+ if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
+ if (value->IsString()) return CompareIC::STRING;
+ if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::SMI:
+ if (value->IsSmi()) return CompareIC::SMI;
+ if (value->IsHeapNumber()) return CompareIC::NUMBER;
+ break;
+ case CompareIC::NUMBER:
+ if (value->IsNumber()) return CompareIC::NUMBER;
+ break;
+ case CompareIC::INTERNALIZED_STRING:
+ if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING;
+ if (value->IsString()) return CompareIC::STRING;
+ if (value->IsSymbol()) return CompareIC::UNIQUE_NAME;
+ break;
+ case CompareIC::STRING:
+ if (value->IsString()) return CompareIC::STRING;
+ break;
+ case CompareIC::UNIQUE_NAME:
+ if (value->IsUniqueName()) return CompareIC::UNIQUE_NAME;
+ break;
+ case CompareIC::OBJECT:
+ if (value->IsJSObject()) return CompareIC::OBJECT;
+ break;
+ case CompareIC::GENERIC:
+ break;
+ case CompareIC::KNOWN_OBJECT:
+ UNREACHABLE();
+ break;
+ }
+ return CompareIC::GENERIC;
+}
+
+
+CompareIC::State CompareIC::TargetState(State old_state,
+ State old_left,
+ State old_right,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- switch (state) {
+ switch (old_state) {
case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMIS;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (x->IsSmi() && y->IsSmi()) return SMI;
+ if (x->IsNumber() && y->IsNumber()) return NUMBER;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
- // HEAP_NUMBER stub will do the right thing.
+ // NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
- return HEAP_NUMBERS;
+ return NUMBER;
}
}
- if (x->IsSymbol() && y->IsSymbol()) {
- // We compare symbols as strings if we need to determine
+ if (x->IsInternalizedString() && y->IsInternalizedString()) {
+ // We compare internalized strings as plain ones if we need to determine
// the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
+ return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
}
- if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsString() && y->IsString()) return STRING;
if (!Token::IsEqualityOp(op_)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
- Handle<JSObject>::cast(y)->map() &&
- Token::IsEqualityOp(op_)) {
- return KNOWN_OBJECTS;
+ Handle<JSObject>::cast(y)->map()) {
+ return KNOWN_OBJECT;
} else {
- return OBJECTS;
+ return OBJECT;
}
}
return GENERIC;
- case SMIS:
- return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
- ? HEAP_NUMBERS
- : GENERIC;
- case SYMBOLS:
+ case SMI:
+ return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
+ case INTERNALIZED_STRING:
ASSERT(Token::IsEqualityOp(op_));
- return x->IsString() && y->IsString() ? STRINGS : GENERIC;
- case HEAP_NUMBERS:
- case STRINGS:
- case OBJECTS:
- case KNOWN_OBJECTS:
+ if (x->IsString() && y->IsString()) return STRING;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+ return GENERIC;
+ case NUMBER:
+ // If the failure was due to one side changing from smi to heap number,
+ // then keep the state (if other changed at the same time, we will get
+ // a second miss and then go to generic).
+ if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
+ if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
+ return GENERIC;
+ case KNOWN_OBJECT:
+ ASSERT(Token::IsEqualityOp(op_));
+ if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
+ return GENERIC;
+ case STRING:
+ case UNIQUE_NAME:
+ case OBJECT:
case GENERIC:
return GENERIC;
}
UNREACHABLE();
- return GENERIC;
+ return GENERIC; // Make the compiler happy.
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope(isolate());
+ State previous_left, previous_right, previous_state;
+ ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
+ &previous_right, &previous_state, NULL);
+ State new_left = InputState(previous_left, x);
+ State new_right = InputState(previous_right, y);
+ State state = TargetState(previous_state, previous_left, previous_right,
+ HasInlinedSmiCode(address()), x, y);
+ ICCompareStub stub(op_, new_left, new_right, state);
+ if (state == KNOWN_OBJECT) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ set_target(*stub.GetCode(isolate()));
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC in ");
+ JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+ PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
+ GetStateName(previous_left),
+ GetStateName(previous_right),
+ GetStateName(previous_state),
+ GetStateName(new_left),
+ GetStateName(new_right),
+ GetStateName(state),
+ Token::Name(op_),
+ static_cast<void*>(*stub.GetCode(isolate())));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
-// Used from ic_<arch>.cc.
+// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
ASSERT(args.length() == 3);
CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
@@ -2687,7 +2627,7 @@ RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) {
old_types.TraceTransition(new_types);
ToBooleanStub stub(tos, new_types);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
ToBooleanIC ic(isolate);
ic.patch(*code);
return Smi::FromInt(to_boolean_value ? 1 : 0);
diff --git a/src/3rdparty/v8/src/ic.h b/src/3rdparty/v8/src/ic.h
index 389c845..cdbdbbd 100644
--- a/src/3rdparty/v8/src/ic.h
+++ b/src/3rdparty/v8/src/ic.h
@@ -50,7 +50,6 @@ namespace internal {
ICU(KeyedStoreIC_MissForceGeneric) \
ICU(KeyedStoreIC_Slow) \
/* Utilities for IC stubs. */ \
- ICU(LoadCallbackProperty) \
ICU(StoreCallbackProperty) \
ICU(LoadPropertyWithInterceptorOnly) \
ICU(LoadPropertyWithInterceptorForLoad) \
@@ -97,8 +96,6 @@ class IC {
Code* target() const { return GetTargetAtAddress(address()); }
inline Address address() const;
- virtual bool IsGeneric() const { return false; }
-
// Compute the current IC state based on the target stub, receiver and name.
static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -117,17 +114,17 @@ class IC {
// Returns if this IC is for contextual (no explicit receiver)
// access to properties.
- bool IsContextual(Handle<Object> receiver) {
+ bool IsUndeclaredGlobal(Handle<Object> receiver) {
if (receiver->IsGlobalObject() ||
IsQmlGlobal(receiver)) {
- return SlowIsContextual();
+ return SlowIsUndeclaredGlobal();
} else {
- ASSERT(!SlowIsContextual());
+ ASSERT(!SlowIsUndeclaredGlobal());
return false;
}
}
- bool SlowIsContextual() {
+ bool SlowIsUndeclaredGlobal() {
return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT;
}
@@ -137,7 +134,8 @@ class IC {
JSObject* holder);
static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object,
JSObject* holder);
- static inline JSObject* GetCodeCacheHolder(Object* object,
+ static inline JSObject* GetCodeCacheHolder(Isolate* isolate,
+ Object* object,
InlineCacheHolderFlag holder);
protected:
@@ -173,6 +171,39 @@ class IC {
static inline void SetTargetAtAddress(Address address, Code* target);
static void PostPatching(Address address, Code* target, Code* old_target);
+ virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ set_target(*handler);
+ }
+ bool UpdatePolymorphicIC(State state,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Code> code);
+ void PatchCache(State state,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name,
+ Handle<Code> code);
+ virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code);
+ virtual Handle<Code> megamorphic_stub() {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual Handle<Code> megamorphic_stub_strict() {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual Handle<Code> generic_stub() const {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+ virtual Handle<Code> generic_stub_strict() const {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+
private:
// Frame pointer for the frame that uses (calls) the IC.
Address fp_;
@@ -327,14 +358,10 @@ class KeyedCallIC: public CallICBase {
class LoadIC: public IC {
public:
- explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_load_stub());
+ explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+ ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub());
}
- MUST_USE_RESULT MaybeObject* Load(State state,
- Handle<Object> object,
- Handle<String> name);
-
// Code generator routines.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GeneratePreMonomorphic(MacroAssembler* masm) {
@@ -344,29 +371,41 @@ class LoadIC: public IC {
static void GenerateMegamorphic(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
- // Specialized code generator routines.
- static void GenerateArrayLength(MacroAssembler* masm);
- static void GenerateStringLength(MacroAssembler* masm,
- bool support_wrappers);
- static void GenerateFunctionPrototype(MacroAssembler* masm);
+ MUST_USE_RESULT MaybeObject* Load(State state,
+ Handle<Object> object,
+ Handle<String> name);
+
+ protected:
+ virtual Code::Kind kind() const { return Code::LOAD_IC; }
+
+ virtual Handle<Code> generic_stub() const {
+ UNREACHABLE();
+ return Handle<Code>::null();
+ }
+
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->LoadIC_Megamorphic();
+ }
- private:
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
State state,
Handle<Object> object,
Handle<String> name);
+ virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
+ virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name);
+ private:
// Stub accessors.
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->LoadIC_Megamorphic();
+ static Handle<Code> initialize_stub() {
+ return Isolate::Current()->builtins()->LoadIC_Initialize();
}
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kLoadIC_Initialize);
- }
- Handle<Code> pre_monomorphic_stub() {
+ virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->LoadIC_PreMonomorphic();
}
@@ -376,131 +415,32 @@ class LoadIC: public IC {
};
-class KeyedIC: public IC {
- public:
- enum StubKind {
- LOAD,
- STORE_NO_TRANSITION,
- STORE_TRANSITION_SMI_TO_OBJECT,
- STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_NO_TRANSITION,
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
- };
-
- static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
- STORE_NO_TRANSITION;
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
- STORE_TRANSITION_SMI_TO_OBJECT);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
- STORE_TRANSITION_SMI_TO_DOUBLE);
- STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
- STORE_TRANSITION_DOUBLE_TO_OBJECT);
-
- explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {}
- virtual ~KeyedIC() {}
-
- static inline KeyedAccessGrowMode GetGrowModeFromStubKind(
- StubKind stub_kind) {
- return (stub_kind >= STORE_AND_GROW_NO_TRANSITION)
- ? ALLOW_JSARRAY_GROWTH
- : DO_NOT_ALLOW_JSARRAY_GROWTH;
- }
-
- static inline StubKind GetGrowStubKind(StubKind stub_kind) {
- ASSERT(stub_kind != LOAD);
- if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
- stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
- kGrowICDelta);
- }
- return stub_kind;
- }
-
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode) = 0;
-
- protected:
- virtual Handle<Code> string_stub() {
- return Handle<Code>::null();
- }
-
- virtual Code::Kind kind() const = 0;
-
- Handle<Code> ComputeStub(Handle<JSObject> receiver,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> default_stub);
-
- virtual Handle<Code> ComputePolymorphicStub(
- MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode) = 0;
-
- Handle<Code> ComputeMonomorphicStubWithoutMapCheck(
- Handle<Map> receiver_map,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
-
- private:
- void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result);
-
- Handle<Code> ComputeMonomorphicStub(Handle<Map> receiver_map,
- StubKind stub_kind,
- StrictModeFlag strict_mode,
- Handle<Code> default_stub);
-
- Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
- StubKind stub_kind);
-
- static bool IsTransitionStubKind(StubKind stub_kind) {
- return stub_kind > STORE_NO_TRANSITION &&
- stub_kind != STORE_AND_GROW_NO_TRANSITION;
- }
-
- static bool IsGrowStubKind(StubKind stub_kind) {
- return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
- }
-
- static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
- if (!IsTransitionStubKind(stub_kind)) return stub_kind;
- if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
- return STORE_NO_TRANSITION;
- }
+enum ICMissMode {
+ MISS_FORCE_GENERIC,
+ MISS
};
-class KeyedLoadIC: public KeyedIC {
+class KeyedLoadIC: public LoadIC {
public:
- explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) {
+ explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
+ : LoadIC(depth, isolate) {
ASSERT(target()->is_keyed_load_stub());
}
MUST_USE_RESULT MaybeObject* Load(State state,
Handle<Object> object,
Handle<Object> key,
- bool force_generic_stub);
+ ICMissMode force_generic);
// Code generator routines.
- static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+ static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateRuntimeGetProperty(MacroAssembler* masm);
static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
static void GeneratePreMonomorphic(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
static void GenerateGeneric(MacroAssembler* masm);
static void GenerateString(MacroAssembler* masm);
@@ -514,45 +454,33 @@ class KeyedLoadIC: public KeyedIC {
static const int kSlowCaseBitFieldMask =
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode);
-
- virtual bool IsGeneric() const {
- return target() == *generic_stub();
- }
-
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
- virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
+ Handle<Code> LoadElementStub(Handle<JSObject> receiver);
- virtual Handle<Code> string_stub() {
- return isolate()->builtins()->KeyedLoadIC_String();
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
+ }
+ virtual Handle<Code> generic_stub() const {
+ return isolate()->builtins()->KeyedLoadIC_Generic();
}
- private:
// Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- Handle<Object> object,
- Handle<String> name);
+ virtual void UpdateMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
+ virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup,
+ Handle<JSObject> receiver,
+ Handle<String> name);
+ virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
+ private:
// Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedLoadIC_Initialize);
+ static Handle<Code> initialize_stub() {
+ return Isolate::Current()->builtins()->KeyedLoadIC_Initialize();
}
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
- Handle<Code> generic_stub() const {
- return isolate()->builtins()->KeyedLoadIC_Generic();
- }
- Handle<Code> pre_monomorphic_stub() {
+ virtual Handle<Code> pre_monomorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_PreMonomorphic();
}
Handle<Code> indexed_interceptor_stub() {
@@ -561,6 +489,9 @@ class KeyedLoadIC: public KeyedIC {
Handle<Code> non_strict_arguments_stub() {
return isolate()->builtins()->KeyedLoadIC_NonStrictArguments();
}
+ Handle<Code> string_stub() {
+ return isolate()->builtins()->KeyedLoadIC_String();
+ }
static void Clear(Address address, Code* target);
@@ -571,26 +502,44 @@ class KeyedLoadIC: public KeyedIC {
class StoreIC: public IC {
public:
explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {
- ASSERT(target()->is_store_stub());
+ ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub());
}
- MUST_USE_RESULT MaybeObject* Store(State state,
- StrictModeFlag strict_mode,
- Handle<Object> object,
- Handle<String> name,
- Handle<Object> value);
-
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateMegamorphic(MacroAssembler* masm,
StrictModeFlag strict_mode);
- static void GenerateArrayLength(MacroAssembler* masm);
static void GenerateNormal(MacroAssembler* masm);
static void GenerateGlobalProxy(MacroAssembler* masm,
StrictModeFlag strict_mode);
- private:
+ MUST_USE_RESULT MaybeObject* Store(
+ State state,
+ StrictModeFlag strict_mode,
+ Handle<Object> object,
+ Handle<String> name,
+ Handle<Object> value,
+ JSReceiver::StoreFromKeyed store_mode =
+ JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+
+ protected:
+ virtual Code::Kind kind() const { return Code::STORE_IC; }
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->StoreIC_Megamorphic();
+ }
+ // Stub accessors.
+ virtual Handle<Code> megamorphic_stub_strict() {
+ return isolate()->builtins()->StoreIC_Megamorphic_Strict();
+ }
+ virtual Handle<Code> global_proxy_stub() {
+ return isolate()->builtins()->StoreIC_GlobalProxy();
+ }
+ virtual Handle<Code> global_proxy_stub_strict() {
+ return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ }
+
+
// Update the inline cache and the global stub cache based on the
// lookup result.
void UpdateCaches(LookupResult* lookup,
@@ -599,7 +548,15 @@ class StoreIC: public IC {
Handle<JSObject> receiver,
Handle<String> name,
Handle<Object> value);
+ // Compute the code stub for this store; used for rewriting to
+ // monomorphic state and making sure that the code stub is in the
+ // stub cache.
+ virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name);
+ private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
@@ -607,30 +564,12 @@ class StoreIC: public IC {
IC::set_target(code);
}
- // Stub accessors.
- Code* megamorphic_stub() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic);
- }
- Code* megamorphic_stub_strict() {
- return isolate()->builtins()->builtin(
- Builtins::kStoreIC_Megamorphic_Strict);
- }
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kStoreIC_Initialize_Strict);
+ static Handle<Code> initialize_stub() {
+ return Isolate::Current()->builtins()->StoreIC_Initialize();
}
- Handle<Code> global_proxy_stub() {
- return isolate()->builtins()->StoreIC_GlobalProxy();
- }
- Handle<Code> global_proxy_stub_strict() {
- return isolate()->builtins()->StoreIC_GlobalProxy_Strict();
+ static Handle<Code> initialize_stub_strict() {
+ return Isolate::Current()->builtins()->StoreIC_Initialize_Strict();
}
-
static void Clear(Address address, Code* target);
friend class IC;
@@ -649,9 +588,46 @@ enum KeyedStoreIncrementLength {
};
-class KeyedStoreIC: public KeyedIC {
+class KeyedStoreIC: public StoreIC {
public:
- explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) {
+ enum StubKind {
+ STORE_NO_TRANSITION,
+ STORE_TRANSITION_SMI_TO_OBJECT,
+ STORE_TRANSITION_SMI_TO_DOUBLE,
+ STORE_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_NO_TRANSITION,
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
+ STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT
+ };
+
+ static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
+ STORE_NO_TRANSITION;
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
+ STORE_TRANSITION_SMI_TO_OBJECT);
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
+ STORE_TRANSITION_SMI_TO_DOUBLE);
+ STATIC_ASSERT(kGrowICDelta ==
+ STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
+ STORE_TRANSITION_DOUBLE_TO_OBJECT);
+
+ static inline StubKind GetGrowStubKind(StubKind stub_kind) {
+ if (stub_kind < STORE_AND_GROW_NO_TRANSITION) {
+ stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) +
+ kGrowICDelta);
+ }
+ return stub_kind;
+ }
+
+ explicit KeyedStoreIC(Isolate* isolate) : StoreIC(isolate) {
ASSERT(target()->is_keyed_store_stub());
}
@@ -660,13 +636,13 @@ class KeyedStoreIC: public KeyedIC {
Handle<Object> object,
Handle<Object> name,
Handle<Object> value,
- bool force_generic);
+ ICMissMode force_generic);
// Code generators for stub routines. Only called once at startup.
static void GenerateInitialize(MacroAssembler* masm) {
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
- static void GenerateMiss(MacroAssembler* masm, bool force_generic);
+ static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic);
static void GenerateSlow(MacroAssembler* masm);
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
StrictModeFlag strict_mode);
@@ -675,32 +651,27 @@ class KeyedStoreIC: public KeyedIC {
static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm);
static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm);
- virtual Handle<Code> GetElementStubWithoutMapCheck(
- bool is_js_array,
- ElementsKind elements_kind,
- KeyedAccessGrowMode grow_mode);
-
- virtual bool IsGeneric() const {
- return target() == *generic_stub() ||
- target() == *generic_stub_strict();
- }
-
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
- virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps,
- StrictModeFlag strict_mode,
- KeyedAccessGrowMode grow_mode);
+ virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup,
+ StrictModeFlag strict_mode,
+ Handle<JSObject> receiver,
+ Handle<String> name);
+ virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { }
- private:
- // Update the inline cache.
- void UpdateCaches(LookupResult* lookup,
- State state,
- StrictModeFlag strict_mode,
- Handle<JSObject> receiver,
- Handle<String> name,
- Handle<Object> value);
+ virtual Handle<Code> megamorphic_stub() {
+ return isolate()->builtins()->KeyedStoreIC_Generic();
+ }
+ virtual Handle<Code> megamorphic_stub_strict() {
+ return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ }
+ Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+ StubKind stub_kind,
+ StrictModeFlag strict_mode);
+
+ private:
void set_target(Code* code) {
// Strict mode must be preserved across IC patching.
ASSERT(Code::GetStrictMode(code->extra_ic_state()) ==
@@ -709,19 +680,11 @@ class KeyedStoreIC: public KeyedIC {
}
// Stub accessors.
- static Code* initialize_stub() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize);
- }
- static Code* initialize_stub_strict() {
- return Isolate::Current()->builtins()->builtin(
- Builtins::kKeyedStoreIC_Initialize_Strict);
+ static Handle<Code> initialize_stub() {
+ return Isolate::Current()->builtins()->KeyedStoreIC_Initialize();
}
- Handle<Code> megamorphic_stub() {
- return isolate()->builtins()->KeyedStoreIC_Generic();
- }
- Handle<Code> megamorphic_stub_strict() {
- return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+ static Handle<Code> initialize_stub_strict() {
+ return Isolate::Current()->builtins()->KeyedStoreIC_Initialize_Strict();
}
Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
@@ -739,6 +702,24 @@ class KeyedStoreIC: public KeyedIC {
Handle<Object> key,
Handle<Object> value);
+ static bool IsTransitionStubKind(StubKind stub_kind) {
+ return stub_kind > STORE_NO_TRANSITION &&
+ stub_kind != STORE_AND_GROW_NO_TRANSITION;
+ }
+
+ static bool IsGrowStubKind(StubKind stub_kind) {
+ return stub_kind >= STORE_AND_GROW_NO_TRANSITION;
+ }
+
+ static StubKind GetNoTransitionStubKind(StubKind stub_kind) {
+ if (!IsTransitionStubKind(stub_kind)) return stub_kind;
+ if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION;
+ return STORE_NO_TRANSITION;
+ }
+
+ Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver,
+ StubKind stub_kind);
+
friend class IC;
};
@@ -750,7 +731,7 @@ class UnaryOpIC: public IC {
enum TypeInfo {
UNINITIALIZED,
SMI,
- HEAP_NUMBER,
+ NUMBER,
GENERIC
};
@@ -775,10 +756,9 @@ class BinaryOpIC: public IC {
UNINITIALIZED,
SMI,
INT32,
- HEAP_NUMBER,
+ NUMBER,
ODDBALL,
- BOTH_STRING, // Only used for addition operation.
- STRING, // Only used for addition operation. At least one string operand.
+ STRING, // Only used for addition operation.
GENERIC
};
@@ -789,23 +769,26 @@ class BinaryOpIC: public IC {
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
-
- static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
-
- static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
};
class CompareIC: public IC {
public:
+ // The type/state lattice is defined by the following inequations:
+ // UNINITIALIZED < ...
+ // ... < GENERIC
+ // SMI < NUMBER
+ // INTERNALIZED_STRING < STRING
+ // KNOWN_OBJECT < OBJECT
enum State {
UNINITIALIZED,
- SMIS,
- HEAP_NUMBERS,
- SYMBOLS,
- STRINGS,
- OBJECTS,
- KNOWN_OBJECTS,
+ SMI,
+ NUMBER,
+ STRING,
+ INTERNALIZED_STRING,
+ UNIQUE_NAME, // Symbol or InternalizedString
+ OBJECT, // JSObject
+ KNOWN_OBJECT, // JSObject with specific map (faster check)
GENERIC
};
@@ -815,27 +798,27 @@ class CompareIC: public IC {
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
+
// Factory method for getting an uninitialized compare stub.
- static Handle<Code> GetUninitialized(Token::Value op);
+ static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
- // Helper function for determining the state of a compare IC.
- static State ComputeState(Code* target);
-
- // Helper function for determining the operation a compare IC is for.
- static Token::Value ComputeOperation(Code* target);
-
static const char* GetStateName(State state);
private:
- State TargetState(State state, bool has_inlined_smi_code,
- Handle<Object> x, Handle<Object> y);
+ static bool HasInlinedSmiCode(Address address);
+
+ State TargetState(State old_state,
+ State old_left,
+ State old_right,
+ bool has_inlined_smi_code,
+ Handle<Object> x,
+ Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
- State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
@@ -859,6 +842,9 @@ class ToBooleanIC: public IC {
enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss);
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure);
+
} } // namespace v8::internal
#endif // V8_IC_H_
diff --git a/src/3rdparty/v8/src/incremental-marking-inl.h b/src/3rdparty/v8/src/incremental-marking-inl.h
index bbe9a9d..1c30383 100644
--- a/src/3rdparty/v8/src/incremental-marking-inl.h
+++ b/src/3rdparty/v8/src/incremental-marking-inl.h
@@ -37,16 +37,27 @@ namespace internal {
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+ HeapObject* value_heap_obj = HeapObject::cast(value);
+ MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
if (Marking::IsWhite(value_bit)) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ if (chunk->IsLeftOfProgressBar(slot)) {
+ WhiteToGreyAndPush(value_heap_obj, value_bit);
+ RestartIfNotMarking();
+ } else {
+ return false;
+ }
+ } else {
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
+ return false;
+ }
+ } else {
+ return false;
}
-
- // Object is either grey or white. It will be scanned if survives.
- return false;
}
if (!is_compacting_) return false;
MarkBit obj_bit = Marking::MarkBitFrom(obj);
@@ -83,6 +94,10 @@ void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ chunk->set_progress_bar(0);
+ }
BlackToGreyAndUnshift(obj, obj_bit);
RestartIfNotMarking();
}
diff --git a/src/3rdparty/v8/src/incremental-marking.cc b/src/3rdparty/v8/src/incremental-marking.cc
index b34d6d9..e2fca5b 100644
--- a/src/3rdparty/v8/src/incremental-marking.cc
+++ b/src/3rdparty/v8/src/incremental-marking.cc
@@ -78,7 +78,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object* value,
+ Object** slot,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
@@ -94,7 +94,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
MemoryChunk::kWriteBarrierCounterGranularity);
}
- marking->RecordWrite(obj, NULL, value);
+ marking->RecordWrite(obj, slot, *slot);
}
@@ -188,16 +188,78 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
}
+static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (mark_bit.Get()) return;
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
+static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
+ MarkBit mark_bit,
+ int size) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (Marking::IsBlack(mark_bit)) return;
+ Marking::MarkBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+ ASSERT(Marking::IsBlack(mark_bit));
+}
+
+
class IncrementalMarkingMarkingVisitor
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
static void Initialize() {
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
-
+ table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
+ static const int kProgressBarScanningChunk = 32 * 1024;
+
+ static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ // TODO(mstarzinger): Move setting of the flag to the allocation site of
+ // the array. The visitor should just check the flag.
+ if (FLAG_use_marking_progress_bar &&
+ chunk->owner()->identity() == LO_SPACE) {
+ chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ Heap* heap = map->GetHeap();
+ // When using a progress bar for large fixed arrays, scan only a chunk of
+ // the array and try to push it onto the marking deque again until it is
+ // fully scanned. Fall back to scanning it through to the end in case this
+ // fails because of a full deque.
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
+ chunk->progress_bar());
+ int end_offset = Min(object_size,
+ start_offset + kProgressBarScanningChunk);
+ bool scan_until_end = false;
+ do {
+ VisitPointersWithAnchor(heap,
+ HeapObject::RawField(object, 0),
+ HeapObject::RawField(object, start_offset),
+ HeapObject::RawField(object, end_offset));
+ start_offset = end_offset;
+ end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+ scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+ } while (scan_until_end && start_offset < object_size);
+ chunk->set_progress_bar(start_offset);
+ if (start_offset < object_size) {
+ heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+ }
+ } else {
+ FixedArrayVisitor::Visit(map, object);
+ }
+ }
+
static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
Context* context = Context::cast(object);
@@ -234,15 +296,25 @@ class IncrementalMarkingMarkingVisitor
}
}
+ INLINE(static void VisitPointersWithAnchor(Heap* heap,
+ Object** anchor,
+ Object** start,
+ Object** end)) {
+ for (Object** p = start; p < end; p++) {
+ Object* obj = *p;
+ if (obj->NonFailureIsHeapObject()) {
+ heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+ MarkObject(heap, obj);
+ }
+ }
+ }
+
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else if (Marking::IsWhite(mark_bit)) {
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
@@ -266,10 +338,9 @@ class IncrementalMarkingMarkingVisitor
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
public:
- IncrementalMarkingRootMarkingVisitor(Heap* heap,
- IncrementalMarking* incremental_marking)
- : heap_(heap),
- incremental_marking_(incremental_marking) {
+ explicit IncrementalMarkingRootMarkingVisitor(
+ IncrementalMarking* incremental_marking)
+ : incremental_marking_(incremental_marking) {
}
void VisitPointer(Object** p) {
@@ -288,10 +359,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
- heap_object->Size());
- }
+ MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
} else {
if (Marking::IsWhite(mark_bit)) {
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -299,7 +367,6 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
}
}
- Heap* heap_;
IncrementalMarking* incremental_marking_;
};
@@ -495,6 +562,7 @@ void IncrementalMarking::UncommitMarkingDeque() {
void IncrementalMarking::Start() {
+ ASSERT(!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress());
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start\n");
}
@@ -503,8 +571,7 @@ void IncrementalMarking::Start() {
ResetStepCounters();
- if (heap_->old_pointer_space()->IsSweepingComplete() &&
- heap_->old_data_space()->IsSweepingComplete()) {
+ if (heap_->IsSweepingComplete()) {
StartMarking(ALLOW_COMPACTION);
} else {
if (FLAG_trace_incremental_marking) {
@@ -560,7 +627,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
}
// Mark strong roots grey.
- IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
+ IncrementalMarkingRootMarkingVisitor visitor(this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
// Ready to start incremental marking.
@@ -616,8 +683,11 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
ASSERT(new_top != marking_deque_.bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
#endif
}
}
@@ -637,11 +707,15 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
- MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
- Marking::MarkBlack(obj_mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+#ifdef DEBUG
+ MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+ SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+ (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+ Marking::IsBlack(mark_bit)));
+#endif
+ MarkBlackOrKeepBlack(obj, mark_bit, size);
}
@@ -680,18 +754,24 @@ void IncrementalMarking::ProcessMarkingDeque() {
void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Hurry\n");
+ if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
start = OS::TimeCurrentMillis();
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Hurry\n");
+ }
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
ProcessMarkingDeque();
state_ = COMPLETE;
- if (FLAG_trace_incremental_marking) {
+ if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
double end = OS::TimeCurrentMillis();
- PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
- static_cast<int>(end - start));
+ double delta = end - start;
+ heap_->AddMarkingTime(delta);
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+ static_cast<int>(delta));
+ }
}
}
@@ -807,7 +887,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
+ marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
@@ -815,7 +895,8 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
double start = 0;
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+ FLAG_print_cumulative_gc_stat) {
start = OS::TimeCurrentMillis();
}
@@ -895,12 +976,14 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
}
}
- if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
+ if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
+ FLAG_print_cumulative_gc_stat) {
double end = OS::TimeCurrentMillis();
double delta = (end - start);
longest_step_ = Max(longest_step_, delta);
steps_took_ += delta;
steps_took_since_last_gc_ += delta;
+ heap_->AddMarkingTime(delta);
}
}
diff --git a/src/3rdparty/v8/src/incremental-marking.h b/src/3rdparty/v8/src/incremental-marking.h
index 6ae0f59..fc5a978 100644
--- a/src/3rdparty/v8/src/incremental-marking.h
+++ b/src/3rdparty/v8/src/incremental-marking.h
@@ -127,7 +127,7 @@ class IncrementalMarking {
}
static void RecordWriteFromCode(HeapObject* obj,
- Object* value,
+ Object** slot,
Isolate* isolate);
static void RecordWriteForEvacuationFromCode(HeapObject* obj,
@@ -164,19 +164,6 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
- // Does white->black or keeps gray or black color. Returns true if converting
- // white to black.
- inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) {
- // Grey or black: Keep the color.
- return false;
- }
- mark_bit.Set();
- ASSERT(Marking::IsBlack(mark_bit));
- return true;
- }
-
inline int steps_count() {
return steps_count_;
}
diff --git a/src/3rdparty/v8/src/interface.cc b/src/3rdparty/v8/src/interface.cc
index 336be82..603dfe9 100644
--- a/src/3rdparty/v8/src/interface.cc
+++ b/src/3rdparty/v8/src/interface.cc
@@ -35,8 +35,8 @@ namespace internal {
static bool Match(void* key1, void* key2) {
String* name1 = *static_cast<String**>(key1);
String* name2 = *static_cast<String**>(key2);
- ASSERT(name1->IsSymbol());
- ASSERT(name2->IsSymbol());
+ ASSERT(name1->IsInternalizedString());
+ ASSERT(name2->IsInternalizedString());
return name1 == name2;
}
@@ -170,6 +170,8 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
ASSERT(!that->IsValue());
+ ASSERT(this->index_ == -1);
+ ASSERT(that->index_ == -1);
ASSERT(*ok);
#ifdef DEBUG
@@ -194,15 +196,6 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
return;
}
- // Merge instance.
- if (!that->instance_.is_null()) {
- if (!this->instance_.is_null() && *this->instance_ != *that->instance_) {
- *ok = false;
- return;
- }
- this->instance_ = that->instance_;
- }
-
// Merge interfaces.
this->flags_ |= that->flags_;
that->forward_ = this;
@@ -227,7 +220,7 @@ void Interface::Print(int n) {
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
- PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
+ PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) ");
ZoneHashMap* map = Chase()->exports_;
if (map == NULL || map->occupancy() == 0) {
PrintF("}\n");
diff --git a/src/3rdparty/v8/src/interface.h b/src/3rdparty/v8/src/interface.h
index 94ef11b..f824a9a 100644
--- a/src/3rdparty/v8/src/interface.h
+++ b/src/3rdparty/v8/src/interface.h
@@ -108,18 +108,18 @@ class Interface : public ZoneObject {
if (*ok) Chase()->flags_ |= MODULE;
}
- // Set associated instance object.
- void MakeSingleton(Handle<JSModule> instance, bool* ok) {
- *ok = IsModule() && Chase()->instance_.is_null();
- if (*ok) Chase()->instance_ = instance;
- }
-
// Do not allow any further refinements, directly or through unification.
void Freeze(bool* ok) {
*ok = IsValue() || IsModule();
if (*ok) Chase()->flags_ |= FROZEN;
}
+ // Assign an index.
+ void Allocate(int index) {
+ ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1);
+ Chase()->index_ = index;
+ }
+
// ---------------------------------------------------------------------------
// Accessors.
@@ -138,7 +138,23 @@ class Interface : public ZoneObject {
// Check whether this is closed (i.e. fully determined).
bool IsFrozen() { return Chase()->flags_ & FROZEN; }
- Handle<JSModule> Instance() { return Chase()->instance_; }
+ bool IsUnified(Interface* that) {
+ return Chase() == that->Chase()
+ || (this->IsValue() == that->IsValue() &&
+ this->IsConst() == that->IsConst());
+ }
+
+ int Length() {
+ ASSERT(IsModule() && IsFrozen());
+ ZoneHashMap* exports = Chase()->exports_;
+ return exports ? exports->occupancy() : 0;
+ }
+
+ // The context slot in the hosting global context pointing to this module.
+ int Index() {
+ ASSERT(IsModule() && IsFrozen());
+ return Chase()->index_;
+ }
// Look up an exported name. Returns NULL if not (yet) defined.
Interface* Lookup(Handle<String> name, Zone* zone);
@@ -194,12 +210,13 @@ class Interface : public ZoneObject {
int flags_;
Interface* forward_; // Unification link
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- Handle<JSModule> instance_;
+ int index_;
explicit Interface(int flags)
: flags_(flags),
forward_(NULL),
- exports_(NULL) {
+ exports_(NULL),
+ index_(-1) {
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Creating %p\n", static_cast<void*>(this));
diff --git a/src/3rdparty/v8/src/interpreter-irregexp.cc b/src/3rdparty/v8/src/interpreter-irregexp.cc
index 3a92b84..5abeb5a 100644
--- a/src/3rdparty/v8/src/interpreter-irregexp.cc
+++ b/src/3rdparty/v8/src/interpreter-irregexp.cc
@@ -68,7 +68,7 @@ static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize,
int from,
int current,
int len,
- Vector<const char> subject) {
+ Vector<const uint8_t> subject) {
for (int i = 0; i < len; i++) {
unsigned int old_char = subject[from++];
unsigned int new_char = subject[current++];
@@ -617,7 +617,7 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match(
uc16 previous_char = '\n';
String::FlatContent subject_content = subject->GetFlatContent();
if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
+ Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(isolate,
code_base,
diff --git a/src/3rdparty/v8/src/isolate.cc b/src/3rdparty/v8/src/isolate.cc
index 3a6099b..eba1982 100644
--- a/src/3rdparty/v8/src/isolate.cc
+++ b/src/3rdparty/v8/src/isolate.cc
@@ -40,6 +40,7 @@
#include "isolate.h"
#include "lithium-allocator.h"
#include "log.h"
+#include "marking-thread.h"
#include "messages.h"
#include "platform.h"
#include "regexp-stack.h"
@@ -49,6 +50,7 @@
#include "simulator.h"
#include "spaces.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
#include "version.h"
#include "vm-state-inl.h"
@@ -130,6 +132,24 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() {
}
+int SystemThreadManager::NumberOfParallelSystemThreads(
+ ParallelSystemComponent type) {
+ int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads);
+ ASSERT(number_of_threads > 0);
+ if (number_of_threads == 1) {
+ return 0;
+ }
+ if (type == PARALLEL_SWEEPING) {
+ return number_of_threads;
+ } else if (type == CONCURRENT_SWEEPING) {
+ return number_of_threads - 1;
+ } else if (type == PARALLEL_MARKING) {
+ return number_of_threads;
+ }
+ return 1;
+}
+
+
// Create a dummy thread that will wait forever on a semaphore. The only
// purpose for this thread is to have some stack area to save essential data
// into for use by a stacks only core dump (aka minidump).
@@ -409,9 +429,9 @@ void Isolate::EnterDefaultIsolate() {
}
-Isolate* Isolate::GetDefaultIsolateForLocking() {
+v8::Isolate* Isolate::GetDefaultIsolateForLocking() {
EnsureDefaultIsolate();
- return default_isolate_;
+ return reinterpret_cast<v8::Isolate*>(default_isolate_);
}
@@ -427,11 +447,6 @@ char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
}
-void Isolate::IterateThread(ThreadVisitor* v) {
- v->VisitThread(this, thread_local_top());
-}
-
-
void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(this, thread);
@@ -527,11 +542,11 @@ Handle<String> Isolate::StackTraceString() {
OS::PrintError(
"If you are lucky you may find a partial stack dump on stdout.\n\n");
incomplete_message_->OutputToStdOut();
- return factory()->empty_symbol();
+ return factory()->empty_string();
} else {
OS::Abort();
// Unreachable
- return factory()->empty_symbol();
+ return factory()->empty_string();
}
}
@@ -542,22 +557,113 @@ void Isolate::PushStackTraceAndDie(unsigned int magic,
unsigned int magic2) {
const int kMaxStackTraceSize = 8192;
Handle<String> trace = StackTraceString();
- char buffer[kMaxStackTraceSize];
+ uint8_t buffer[kMaxStackTraceSize];
int length = Min(kMaxStackTraceSize - 1, trace->length());
String::WriteToFlat(*trace, buffer, 0, length);
buffer[length] = '\0';
+ // TODO(dcarney): convert buffer to utf8?
OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
magic, magic2,
static_cast<void*>(object), static_cast<void*>(map),
- buffer);
+ reinterpret_cast<char*>(buffer));
OS::Abort();
}
-void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
+// Determines whether the given stack frame should be displayed in
+// a stack trace. The caller is the error constructor that asked
+// for the stack trace to be collected. The first time a construct
+// call to this function is encountered it is skipped. The seen_caller
+// in/out parameter is used to remember if the caller has been seen
+// yet.
+static bool IsVisibleInStackTrace(StackFrame* raw_frame,
+ Object* caller,
+ bool* seen_caller) {
+ // Only display JS frames.
+ if (!raw_frame->is_java_script()) return false;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ Object* raw_fun = frame->function();
+ // Not sure when this can happen but skip it just in case.
+ if (!raw_fun->IsJSFunction()) return false;
+ if ((raw_fun == caller) && !(*seen_caller)) {
+ *seen_caller = true;
+ return false;
+ }
+ // Skip all frames until we've seen the caller.
+ if (!(*seen_caller)) return false;
+ // Also, skip non-visible built-in functions and any call with the builtins
+ // object as receiver, so as to not reveal either the builtins object or
+ // an internal function.
+ // The --builtins-in-stack-traces command line flag allows including
+ // internal call sites in the stack trace for debugging purposes.
+ if (!FLAG_builtins_in_stack_traces) {
+ JSFunction* fun = JSFunction::cast(raw_fun);
+ if (frame->receiver()->IsJSBuiltinsObject() ||
+ (fun->IsBuiltin() && !fun->shared()->native())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller,
+ int limit) {
+ limit = Max(limit, 0); // Ensure that limit is not negative.
+ int initial_size = Min(limit, 10);
+ Handle<FixedArray> elements =
+ factory()->NewFixedArrayWithHoles(initial_size * 4);
+
+ // If the caller parameter is a function we skip frames until we're
+ // under it before starting to collect.
+ bool seen_caller = !caller->IsJSFunction();
+ int cursor = 0;
+ int frames_seen = 0;
+ for (StackFrameIterator iter(this);
+ !iter.done() && frames_seen < limit;
+ iter.Advance()) {
+ StackFrame* raw_frame = iter.frame();
+ if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
+ frames_seen++;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ if (cursor + 4 > elements->length()) {
+ int new_capacity = JSObject::NewElementsCapacity(elements->length());
+ Handle<FixedArray> new_elements =
+ factory()->NewFixedArrayWithHoles(new_capacity);
+ for (int i = 0; i < cursor; i++) {
+ new_elements->set(i, elements->get(i));
+ }
+ elements = new_elements;
+ }
+ ASSERT(cursor + 4 <= elements->length());
+
+ Handle<Object> recv = frames[i].receiver();
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Code> code = frames[i].code();
+ Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *code);
+ elements->set(cursor++, *offset);
+ }
+ }
+ }
+ Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
+ result->set_length(Smi::FromInt(cursor));
+ return result;
+}
+
+
+void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
- Handle<String> key = factory()->hidden_stack_trace_symbol();
+ Handle<String> key = factory()->hidden_stack_trace_string();
Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
@@ -572,15 +678,21 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
int limit = Max(frame_limit, 0);
Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- Handle<String> column_key = factory()->LookupAsciiSymbol("column");
- Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
- Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+ Handle<String> column_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
+ Handle<String> line_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
+ Handle<String> script_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
Handle<String> script_name_or_source_url_key =
- factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
- Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
- Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval");
+ factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
+ Handle<String> function_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
+ Handle<String> eval_key =
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
Handle<String> constructor_key =
- factory()->LookupAsciiSymbol("isConstructor");
+ factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
StackTraceFrameIterator it(this);
int frames_seen = 0;
@@ -617,13 +729,13 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, column_key,
- Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE));
+ Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE));
}
CHECK_NOT_EMPTY_HANDLE(
this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, line_key,
- Handle<Smi>(Smi::FromInt(line_number + 1)), NONE));
+ Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE));
}
if (options & StackTrace::kScriptName) {
@@ -715,9 +827,10 @@ void Isolate::PrintStack() {
}
-static void PrintFrames(StringStream* accumulator,
+static void PrintFrames(Isolate* isolate,
+ StringStream* accumulator,
StackFrame::PrintMode mode) {
- StackFrameIterator it;
+ StackFrameIterator it(isolate);
for (int i = 0; !it.done(); it.Advance()) {
it.frame()->Print(accumulator, mode, i++);
}
@@ -727,7 +840,7 @@ static void PrintFrames(StringStream* accumulator,
void Isolate::PrintStack(StringStream* accumulator) {
if (!IsInitialized()) {
accumulator->Add(
- "\n==== Stack trace is not available ==========================\n\n");
+ "\n==== JS stack trace is not available =======================\n\n");
accumulator->Add(
"\n==== Isolate for the thread is not initialized =============\n\n");
return;
@@ -740,12 +853,12 @@ void Isolate::PrintStack(StringStream* accumulator) {
if (c_entry_fp(thread_local_top()) == 0) return;
accumulator->Add(
- "\n==== Stack trace ============================================\n\n");
- PrintFrames(accumulator, StackFrame::OVERVIEW);
+ "\n==== JS stack trace =========================================\n\n");
+ PrintFrames(this, accumulator, StackFrame::OVERVIEW);
accumulator->Add(
"\n==== Details ================================================\n\n");
- PrintFrames(accumulator, StackFrame::DETAILS);
+ PrintFrames(this, accumulator, StackFrame::DETAILS);
accumulator->PrintMentionedObjectCache();
accumulator->Add("=====================\n\n");
@@ -777,9 +890,9 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) {
constructor->shared()->get_api_func_data()->access_check_info();
if (data_obj == heap_.undefined_value()) return;
- HandleScope scope;
+ HandleScope scope(this);
Handle<JSObject> receiver_handle(receiver);
- Handle<Object> data(AccessCheckInfo::cast(data_obj)->data());
+ Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this);
{ VMState state(this, EXTERNAL);
thread_local_top()->failed_access_check_callback_(
v8::Utils::ToLocal(receiver_handle),
@@ -828,7 +941,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
// Skip checks for hidden properties access. Note, we do not
// require existence of a context in this case.
- if (key == heap_.hidden_symbol()) return true;
+ if (key == heap_.hidden_string()) return true;
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
@@ -838,7 +951,11 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
if (decision != UNKNOWN) return decision == YES;
// Get named access check callback
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ // TODO(dcarney): revert
+ Map* map = receiver->map();
+ CHECK(map->IsMap());
+ CHECK(map->constructor()->IsJSFunction());
+ JSFunction* constructor = JSFunction::cast(map->constructor());
if (!constructor->shared()->IsApiFunction()) return false;
Object* data_obj =
@@ -916,16 +1033,29 @@ const char* const Isolate::kStackOverflowMessage =
Failure* Isolate::StackOverflow() {
- HandleScope scope;
- Handle<String> key = factory()->stack_overflow_symbol();
+ HandleScope scope(this);
+ // At this point we cannot create an Error object using its javascript
+ // constructor. Instead, we copy the pre-constructed boilerplate and
+ // attach the stack trace as a hidden property.
+ Handle<String> key = factory()->stack_overflow_string();
Handle<JSObject> boilerplate =
- Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
- Handle<Object> exception = Copy(boilerplate);
- // TODO(1240995): To avoid having to call JavaScript code to compute
- // the message for stack overflow exceptions which is very likely to
- // double fault with another stack overflow exception, we use a
- // precomputed message.
+ Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key));
+ Handle<JSObject> exception = Copy(boilerplate);
DoThrow(*exception, NULL);
+
+ // Get stack trace limit.
+ Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
+ if (!error->IsJSObject()) return Failure::Exception();
+ Handle<Object> stack_trace_limit =
+ GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
+ if (!stack_trace_limit->IsNumber()) return Failure::Exception();
+ int limit = static_cast<int>(stack_trace_limit->Number());
+
+ Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
+ exception, factory()->undefined_value(), limit);
+ JSObject::SetHiddenProperty(exception,
+ factory()->hidden_stack_trace_string(),
+ stack_trace);
return Failure::Exception();
}
@@ -958,7 +1088,7 @@ Failure* Isolate::ReThrow(MaybeObject* exception) {
Failure* Isolate::ThrowIllegalOperation() {
- return Throw(heap_.illegal_access_symbol());
+ return Throw(heap_.illegal_access_string());
}
@@ -966,9 +1096,12 @@ void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
+ PropagatePendingExceptionToExternalTryCatch();
+ if (has_pending_exception()) {
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
+ }
}
@@ -983,14 +1116,14 @@ Failure* Isolate::PromoteScheduledException() {
void Isolate::PrintCurrentStackTrace(FILE* out) {
StackTraceFrameIterator it(this);
while (!it.done()) {
- HandleScope scope;
+ HandleScope scope(this);
// Find code position if recorded in relocation info.
JavaScriptFrame* frame = it.frame();
int pos = frame->LookupCode()->SourcePosition(frame->pc());
- Handle<Object> pos_obj(Smi::FromInt(pos));
+ Handle<Object> pos_obj(Smi::FromInt(pos), this);
// Fetch function and receiver.
Handle<JSFunction> fun(JSFunction::cast(frame->function()));
- Handle<Object> recv(frame->receiver());
+ Handle<Object> recv(frame->receiver(), this);
// Advance to the next JavaScript frame and determine if the
// current frame is the top-level frame.
it.Advance();
@@ -1060,12 +1193,13 @@ bool Isolate::ShouldReportException(bool* can_be_caught_externally,
bool Isolate::IsErrorObject(Handle<Object> obj) {
if (!obj->IsJSObject()) return false;
- String* error_key = *(factory()->LookupAsciiSymbol("$Error"));
+ String* error_key =
+ *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error")));
Object* error_constructor =
js_builtins_object()->GetPropertyNoExceptionThrown(error_key);
for (Object* prototype = *obj; !prototype->IsNull();
- prototype = prototype->GetPrototype()) {
+ prototype = prototype->GetPrototype(this)) {
if (!prototype->IsJSObject()) return false;
if (JSObject::cast(prototype)->map()->constructor() == error_constructor) {
return true;
@@ -1078,8 +1212,8 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ASSERT(!has_pending_exception());
- HandleScope scope;
- Handle<Object> exception_handle(exception);
+ HandleScope scope(this);
+ Handle<Object> exception_handle(exception, this);
// Determine reporting and whether the exception is caught externally.
bool catchable_by_javascript = is_catchable_by_javascript(exception);
@@ -1116,7 +1250,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception_handle)) {
// We fetch the stack trace that corresponds to this error object.
- String* key = heap()->hidden_stack_trace_symbol();
+ String* key = heap()->hidden_stack_trace_string();
Object* stack_property =
JSObject::cast(*exception_handle)->GetHiddenProperty(key);
// Property lookup may have failed. In this case it's probably not
@@ -1132,18 +1266,23 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
stack_trace_for_uncaught_exceptions_options_);
}
}
- // Stringify custom error objects for the message object.
- if (exception_handle->IsJSObject() && !IsErrorObject(exception_handle)) {
+
+ Handle<Object> exception_arg = exception_handle;
+ // If the exception argument is a custom object, turn it into a string
+ // before throwing as uncaught exception. Note that the pending
+ // exception object to be set later must not be turned into a string.
+ if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
bool failed = false;
- exception_handle = Execution::ToString(exception_handle, &failed);
+ exception_arg = Execution::ToDetailString(exception_arg, &failed);
if (failed) {
- exception_handle = factory()->LookupAsciiSymbol("exception");
+ exception_arg = factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("exception"));
}
}
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
"uncaught_exception",
location,
- HandleVector<Object>(&exception_handle, 1),
+ HandleVector<Object>(&exception_arg, 1),
stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
@@ -1237,8 +1376,8 @@ void Isolate::ReportPendingMessages() {
// the native context. Note: We have to mark the native context here
// since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to
// set it.
- HandleScope scope;
- if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ HandleScope scope(this);
+ if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
context()->mark_out_of_memory();
} else if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
@@ -1248,8 +1387,9 @@ void Isolate::ReportPendingMessages() {
if (thread_local_top_.has_pending_message_) {
thread_local_top_.has_pending_message_ = false;
if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- HandleScope scope;
- Handle<Object> message_obj(thread_local_top_.pending_message_obj_);
+ HandleScope scope(this);
+ Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
+ this);
if (thread_local_top_.pending_message_script_ != NULL) {
Handle<Script> script(thread_local_top_.pending_message_script_);
int start_pos = thread_local_top_.pending_message_start_pos_;
@@ -1266,6 +1406,24 @@ void Isolate::ReportPendingMessages() {
}
+MessageLocation Isolate::GetMessageLocation() {
+ ASSERT(has_pending_exception());
+
+ if (!thread_local_top_.pending_exception_->IsOutOfMemory() &&
+ thread_local_top_.pending_exception_ != heap()->termination_exception() &&
+ thread_local_top_.has_pending_message_ &&
+ !thread_local_top_.pending_message_obj_->IsTheHole() &&
+ thread_local_top_.pending_message_script_ != NULL) {
+ Handle<Script> script(thread_local_top_.pending_message_script_);
+ int start_pos = thread_local_top_.pending_message_start_pos_;
+ int end_pos = thread_local_top_.pending_message_end_pos_;
+ return MessageLocation(script, start_pos, end_pos);
+ }
+
+ return MessageLocation();
+}
+
+
void Isolate::TraceException(bool flag) {
FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
}
@@ -1296,7 +1454,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
Address external_handler_address =
thread_local_top()->try_catch_handler_address();
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(this);
if (it.done() || (it.frame()->sp() > external_handler_address)) {
clear_exception = true;
}
@@ -1357,7 +1515,7 @@ Handle<Context> Isolate::global_context() {
Handle<Context> Isolate::GetCallingNativeContext() {
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (debug_->InDebugger()) {
while (!it.done()) {
@@ -1417,6 +1575,14 @@ Isolate::ThreadDataTable::ThreadDataTable()
}
+Isolate::ThreadDataTable::~ThreadDataTable() {
+ // TODO(svenpanne) The assertion below would fire if an embedder does not
+ // cleanly dispose all Isolates before disposing v8, so we are conservative
+ // and leave it out for now.
+ // ASSERT_EQ(NULL, list_);
+}
+
+
Isolate::PerIsolateThreadData*
Isolate::ThreadDataTable::Lookup(Isolate* isolate,
ThreadId thread_id) {
@@ -1510,7 +1676,7 @@ Isolate::Isolate()
free_list_(0),
preallocated_storage_preallocated_(false),
inner_pointer_to_code_cache_(NULL),
- write_input_buffer_(NULL),
+ write_iterator_(NULL),
global_handles_(NULL),
context_switcher_(NULL),
thread_manager_(NULL),
@@ -1519,9 +1685,12 @@ Isolate::Isolate()
string_tracker_(NULL),
regexp_stack_(NULL),
date_cache_(NULL),
+ code_stub_interface_descriptors_(NULL),
context_exit_happened_(false),
deferred_handles_head_(NULL),
- optimizing_compiler_thread_(this) {
+ optimizing_compiler_thread_(this),
+ marking_thread_(NULL),
+ sweeper_thread_(NULL) {
TRACE_ISOLATE(constructor);
memset(isolate_addresses_, 0,
@@ -1547,6 +1716,8 @@ Isolate::Isolate()
memset(&js_spill_information_, 0, sizeof(js_spill_information_));
memset(code_kind_statistics_, 0,
sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS);
+
+ allow_handle_deref_ = true;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1567,6 +1738,7 @@ Isolate::Isolate()
#undef ISOLATE_INIT_ARRAY_EXECUTE
}
+
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
@@ -1598,10 +1770,31 @@ void Isolate::TearDown() {
}
+void Isolate::GlobalTearDown() {
+ delete thread_data_table_;
+}
+
+
void Isolate::Deinit() {
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+ if (FLAG_sweeper_threads > 0) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ }
+ delete[] sweeper_thread_;
+ }
+
+ if (FLAG_marking_threads > 0) {
+ for (int i = 0; i < FLAG_marking_threads; i++) {
+ marking_thread_[i]->Stop();
+ delete marking_thread_[i];
+ }
+ delete[] marking_thread_;
+ }
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
@@ -1612,7 +1805,7 @@ void Isolate::Deinit() {
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
if (FLAG_preemption) {
- v8::Locker locker;
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
v8::Locker::StopPreemption();
}
builtins_.TearDown();
@@ -1681,6 +1874,9 @@ Isolate::~Isolate() {
delete date_cache_;
date_cache_ = NULL;
+ delete[] code_stub_interface_descriptors_;
+ code_stub_interface_descriptors_ = NULL;
+
delete regexp_stack_;
regexp_stack_ = NULL;
@@ -1717,8 +1913,8 @@ Isolate::~Isolate() {
bootstrapper_ = NULL;
delete inner_pointer_to_code_cache_;
inner_pointer_to_code_cache_ = NULL;
- delete write_input_buffer_;
- write_input_buffer_ = NULL;
+ delete write_iterator_;
+ write_iterator_ = NULL;
delete context_switcher_;
context_switcher_ = NULL;
@@ -1761,7 +1957,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
if (!external_caught) return;
- if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) {
+ if (thread_local_top_.pending_exception_->IsOutOfMemory()) {
// Do not propagate OOM exception: we should kill VM asap.
} else if (thread_local_top_.pending_exception_ ==
heap()->termination_exception()) {
@@ -1782,7 +1978,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() {
void Isolate::InitializeLoggingAndCounters() {
if (logger_ == NULL) {
- logger_ = new Logger;
+ logger_ = new Logger(this);
}
if (counters_ == NULL) {
counters_ = new Counters;
@@ -1836,14 +2032,16 @@ bool Isolate::Init(Deserializer* des) {
descriptor_lookup_cache_ = new DescriptorLookupCache();
unicode_cache_ = new UnicodeCache();
inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this);
- write_input_buffer_ = new StringInputBuffer();
+ write_iterator_ = new ConsStringIteratorOp();
global_handles_ = new GlobalHandles(this);
- bootstrapper_ = new Bootstrapper();
+ bootstrapper_ = new Bootstrapper(this);
handle_scope_implementer_ = new HandleScopeImplementer(this);
stub_cache_ = new StubCache(this, runtime_zone());
regexp_stack_ = new RegExpStack();
regexp_stack_->isolate_ = this;
date_cache_ = new DateCache();
+ code_stub_interface_descriptors_ =
+ new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
// Enable logging before setting up the heap
logger_->SetUp();
@@ -1867,10 +2065,17 @@ bool Isolate::Init(Deserializer* des) {
}
// SetUp the object heap.
- const bool create_heap_objects = (des == NULL);
ASSERT(!heap_.HasBeenSetUp());
- if (!heap_.SetUp(create_heap_objects)) {
- V8::SetFatalError();
+ if (!heap_.SetUp()) {
+ V8::FatalProcessOutOfMemory("heap setup");
+ return false;
+ }
+
+ deoptimizer_data_ = new DeoptimizerData;
+
+ const bool create_heap_objects = (des == NULL);
+ if (create_heap_objects && !heap_.CreateHeapObjects()) {
+ V8::FatalProcessOutOfMemory("heap object creation");
return false;
}
@@ -1896,7 +2101,7 @@ bool Isolate::Init(Deserializer* des) {
}
if (FLAG_preemption) {
- v8::Locker locker;
+ v8::Locker locker(reinterpret_cast<v8::Isolate*>(this));
v8::Locker::StartPreemption(100);
}
@@ -1922,15 +2127,14 @@ bool Isolate::Init(Deserializer* des) {
// Quiet the heap NaN if needed on target platform.
if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value());
- deoptimizer_data_ = new DeoptimizerData;
runtime_profiler_ = new RuntimeProfiler(this);
runtime_profiler_->SetUp();
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
- if (create_heap_objects &&
+ if (!create_heap_objects &&
(FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
- HandleScope scope;
+ HandleScope scope(this);
LOG(this, LogCodeObjects());
LOG(this, LogCompiledFunctions());
}
@@ -1944,7 +2148,63 @@ bool Isolate::Init(Deserializer* des) {
state_ = INITIALIZED;
time_millis_at_init_ = OS::TimeCurrentMillis();
+
+ if (!create_heap_objects) {
+ // Now that the heap is consistent, it's OK to generate the code for the
+ // deopt entry table that might have been referred to by optimized code in
+ // the snapshot.
+ HandleScope scope(this);
+ Deoptimizer::EnsureCodeForDeoptimizationEntry(
+ this,
+ Deoptimizer::LAZY,
+ kDeoptTableSerializeEntryCount - 1);
+ }
+
+ if (!Serializer::enabled()) {
+ // Ensure that the stub failure trampoline has been generated.
+ HandleScope scope(this);
+ CodeStub::GenerateFPStubs(this);
+ StubFailureTrampolineStub::GenerateAheadOfTime(this);
+ }
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+ if (FLAG_parallel_marking && FLAG_marking_threads == 0) {
+ FLAG_marking_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_MARKING);
+ }
+ if (FLAG_marking_threads > 0) {
+ marking_thread_ = new MarkingThread*[FLAG_marking_threads];
+ for (int i = 0; i < FLAG_marking_threads; i++) {
+ marking_thread_[i] = new MarkingThread(this);
+ marking_thread_[i]->Start();
+ }
+ } else {
+ FLAG_parallel_marking = false;
+ }
+
+ if (FLAG_sweeper_threads == 0) {
+ if (FLAG_concurrent_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::CONCURRENT_SWEEPING);
+ } else if (FLAG_parallel_sweeping) {
+ FLAG_sweeper_threads = SystemThreadManager::
+ NumberOfParallelSystemThreads(
+ SystemThreadManager::PARALLEL_SWEEPING);
+ }
+ }
+ if (FLAG_sweeper_threads > 0) {
+ sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
+ } else {
+ FLAG_concurrent_sweeping = false;
+ FLAG_parallel_sweeping = false;
+ }
return true;
}
@@ -2058,6 +2318,12 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
}
+CodeStubInterfaceDescriptor*
+ Isolate::code_stub_interface_descriptor(int index) {
+ return code_stub_interface_descriptors_ + index;
+}
+
+
#ifdef DEBUG
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_);
diff --git a/src/3rdparty/v8/src/isolate.h b/src/3rdparty/v8/src/isolate.h
index 889a3e1..53fece7 100644
--- a/src/3rdparty/v8/src/isolate.h
+++ b/src/3rdparty/v8/src/isolate.h
@@ -53,6 +53,7 @@ namespace internal {
class Bootstrapper;
class CodeGenerator;
class CodeRange;
+struct CodeStubInterfaceDescriptor;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
@@ -70,13 +71,15 @@ class HeapProfiler;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
+class MarkingThread;
class PreallocatedMemoryThread;
class RegExpStack;
class SaveContext;
class UnicodeCache;
-class StringInputBuffer;
+class ConsStringIteratorOp;
class StringTracker;
class StubCache;
+class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
@@ -287,6 +290,20 @@ class ThreadLocalTop BASE_EMBEDDED {
};
+class SystemThreadManager {
+ public:
+ enum ParallelSystemComponent {
+ PARALLEL_SWEEPING,
+ CONCURRENT_SWEEPING,
+ PARALLEL_MARKING
+ };
+
+ static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
+
+ static const int kMaxThreads = 4;
+};
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
@@ -470,6 +487,8 @@ class Isolate {
// for legacy API reasons.
void TearDown();
+ static void GlobalTearDown();
+
bool IsDefaultIsolate() const { return this == default_isolate_; }
// Ensures that process-wide resources and the default isolate have been
@@ -533,11 +552,6 @@ class Isolate {
thread_local_top_.save_context_ = save;
}
- // Access to the map of "new Object()".
- Map* empty_object_map() {
- return context()->native_context()->object_function()->map();
- }
-
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
@@ -617,7 +631,7 @@ class Isolate {
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
- return (exception != Failure::OutOfMemoryException()) &&
+ return (!exception->IsOutOfMemory()) &&
(exception != heap()->termination_exception());
}
@@ -676,7 +690,8 @@ class Isolate {
// Scope currently can only be used for regular exceptions, not
// failures like OOM or termination exception.
isolate_(isolate),
- pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
+ pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(),
+ isolate_),
catcher_(isolate_->catcher())
{ }
@@ -719,7 +734,10 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
+ Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+ Handle<Object> caller,
+ int limit);
+ void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
@@ -734,8 +752,9 @@ class Isolate {
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
- void SetUserObjectComparisonCallback(v8::UserObjectComparisonCallback callback);
- inline v8::UserObjectComparisonCallback UserObjectComparisonCallback() {
+ void SetUserObjectComparisonCallback(
+ v8::UserObjectComparisonCallback callback);
+ inline v8::UserObjectComparisonCallback UserObjectComparisonCallback() {
return thread_local_top()->user_object_comparison_callback_;
}
@@ -748,6 +767,8 @@ class Isolate {
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
+ // Return pending location if any or unfilled structure.
+ MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
@@ -773,7 +794,6 @@ class Isolate {
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
- void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
@@ -817,9 +837,9 @@ class Isolate {
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(context()->native_context()->name()); \
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> name() { \
+ return Handle<type>(context()->native_context()->name(), this); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -884,7 +904,7 @@ class Isolate {
return inner_pointer_to_code_cache_;
}
- StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
+ ConsStringIteratorOp* write_iterator() { return write_iterator_; }
GlobalHandles* global_handles() { return global_handles_; }
@@ -906,16 +926,16 @@ class Isolate {
return &jsregexp_canonrange_;
}
- StringInputBuffer* objects_string_compare_buffer_a() {
- return &objects_string_compare_buffer_a_;
+ ConsStringIteratorOp* objects_string_compare_iterator_a() {
+ return &objects_string_compare_iterator_a_;
}
- StringInputBuffer* objects_string_compare_buffer_b() {
- return &objects_string_compare_buffer_b_;
+ ConsStringIteratorOp* objects_string_compare_iterator_b() {
+ return &objects_string_compare_iterator_b_;
}
- StaticResource<StringInputBuffer>* objects_string_input_buffer() {
- return &objects_string_input_buffer_;
+ StaticResource<ConsStringIteratorOp>* objects_string_iterator() {
+ return &objects_string_iterator_;
}
RuntimeState* runtime_state() { return &runtime_state_; }
@@ -926,10 +946,6 @@ class Isolate {
bool fp_stubs_generated() { return fp_stubs_generated_; }
- StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
- return &compiler_safe_string_input_buffer_;
- }
-
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -976,6 +992,9 @@ class Isolate {
}
int* code_kind_statistics() { return code_kind_statistics_; }
+
+ bool allow_handle_deref() { return allow_handle_deref_; }
+ void set_allow_handle_deref(bool allow) { allow_handle_deref_ = allow; }
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
@@ -1023,7 +1042,6 @@ class Isolate {
RuntimeProfiler::IsolateEnteredJS(this);
} else if (current_state == JS && state != JS) {
// JS -> non-JS transition.
- ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
RuntimeProfiler::IsolateExitedJS(this);
} else {
// Other types of state transitions are not interesting to the
@@ -1067,6 +1085,9 @@ class Isolate {
date_cache_ = date_cache;
}
+ CodeStubInterfaceDescriptor*
+ code_stub_interface_descriptor(int index);
+
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1075,6 +1096,19 @@ class Isolate {
return &optimizing_compiler_thread_;
}
+ // PreInits and returns a default isolate. Needed when a new thread tries
+ // to create a Locker for the first time (the lock itself is in the isolate).
+ // TODO(svenpanne) This method is on death row...
+ static v8::Isolate* GetDefaultIsolateForLocking();
+
+ MarkingThread** marking_threads() {
+ return marking_thread_;
+ }
+
+ SweeperThread** sweeper_threads() {
+ return sweeper_thread_;
+ }
+
private:
Isolate();
@@ -1158,10 +1192,6 @@ class Isolate {
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
- // PreInits and returns a default isolate. Needed when a new thread tries
- // to create a Locker for the first time (the lock itself is in the isolate).
- static Isolate* GetDefaultIsolateForLocking();
-
// Initializes the current thread to run this Isolate.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
@@ -1230,26 +1260,26 @@ class Isolate {
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
- StringInputBuffer* write_input_buffer_;
+ ConsStringIteratorOp* write_iterator_;
GlobalHandles* global_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
- StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
- StringInputBuffer objects_string_compare_buffer_a_;
- StringInputBuffer objects_string_compare_buffer_b_;
- StaticResource<StringInputBuffer> objects_string_input_buffer_;
+ ConsStringIteratorOp objects_string_compare_iterator_a_;
+ ConsStringIteratorOp objects_string_compare_iterator_b_;
+ StaticResource<ConsStringIteratorOp> objects_string_iterator_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
+ CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
@@ -1270,6 +1300,8 @@ class Isolate {
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
int code_kind_statistics_[Code::NUMBER_OF_KINDS];
+
+ bool allow_handle_deref_;
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1300,16 +1332,21 @@ class Isolate {
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread optimizing_compiler_thread_;
+ MarkingThread** marking_thread_;
+ SweeperThread** sweeper_thread_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
+ friend class MarkingThread;
friend class OptimizingCompilerThread;
+ friend class SweeperThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
+ friend class TestCodeRangeScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
@@ -1403,12 +1440,7 @@ class StackLimitCheck BASE_EMBEDDED {
bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
- // Stack has overflowed in C++ code only if stack pointer exceeds the C++
- // stack guard and the limits are not set to interrupt values.
- // TODO(214): Stack overflows are ignored if a interrupt is pending. This
- // code should probably always use the initial C++ limit.
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
- stack_guard->IsStackOverflow();
+ return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit());
}
private:
Isolate* isolate_;
diff --git a/src/3rdparty/v8/src/json-parser.h b/src/3rdparty/v8/src/json-parser.h
index 6f8c715..28ef8b3 100644
--- a/src/3rdparty/v8/src/json-parser.h
+++ b/src/3rdparty/v8/src/json-parser.h
@@ -58,7 +58,7 @@ class JsonParser BASE_EMBEDDED {
if (position_ >= source_length_) {
c0_ = kEndOfString;
} else if (seq_ascii) {
- c0_ = seq_source_->SeqAsciiStringGet(position_);
+ c0_ = seq_source_->SeqOneByteStringGet(position_);
} else {
c0_ = source_->Get(position_);
}
@@ -102,10 +102,10 @@ class JsonParser BASE_EMBEDDED {
Handle<String> ParseJsonString() {
return ScanJsonString<false>();
}
- Handle<String> ParseJsonSymbol() {
+ Handle<String> ParseJsonInternalizedString() {
return ScanJsonString<true>();
}
- template <bool is_symbol>
+ template <bool is_internalized>
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
// of it. Then scans the rest of the string, adding characters after the
@@ -160,7 +160,7 @@ class JsonParser BASE_EMBEDDED {
private:
Handle<String> source_;
int source_length_;
- Handle<SeqAsciiString> seq_source_;
+ Handle<SeqOneByteString> seq_source_;
PretenureFlag pretenure_;
Isolate* isolate_;
@@ -186,7 +186,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
// Optimized fast case where we only have ASCII characters.
if (seq_ascii) {
- seq_source_ = Handle<SeqAsciiString>::cast(source_);
+ seq_source_ = Handle<SeqOneByteString>::cast(source_);
}
// Set initial position right before the string.
@@ -228,7 +228,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
break;
default:
message = "unexpected_token";
- Handle<Object> name = LookupSingleCharacterStringFromCode(c0_);
+ Handle<Object> name =
+ LookupSingleCharacterStringFromCode(isolate_, c0_);
Handle<FixedArray> element = factory->NewFixedArray(1);
element->set(0, *name);
array = factory->NewJSArrayWithElements(element);
@@ -290,7 +291,6 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
// Parse a JSON object. Position must be right at '{'.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
- Handle<Object> prototype;
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
ASSERT_EQ(c0_, '{');
@@ -338,29 +338,24 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
c0_ = '"';
#endif
- Handle<String> key = ParseJsonSymbol();
+ Handle<String> key = ParseJsonInternalizedString();
if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
AdvanceSkipWhitespace();
Handle<Object> value = ParseJsonValue();
if (value.is_null()) return ReportUnexpectedCharacter();
- if (key->Equals(isolate()->heap()->Proto_symbol())) {
- prototype = value;
+ if (JSObject::TryTransitionToField(json_object, key)) {
+ int index = json_object->LastAddedFieldIndex();
+ json_object->FastPropertyAtPut(index, *value);
} else {
- if (JSObject::TryTransitionToField(json_object, key)) {
- int index = json_object->LastAddedFieldIndex();
- json_object->FastPropertyAtPut(index, *value);
- } else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
- }
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
}
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
}
- if (!prototype.is_null()) SetPrototype(json_object, prototype);
}
AdvanceSkipWhitespace();
return json_object;
@@ -441,21 +436,21 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
int length = position_ - beg_pos;
double number;
if (seq_ascii) {
- Vector<const char> chars(seq_source_->GetChars() + beg_pos, length);
+ Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length);
number = StringToDouble(isolate()->unicode_cache(),
- chars,
+ Vector<const char>::cast(chars),
NO_FLAGS, // Hex, octal or trailing junk.
OS::nan_value());
} else {
- Vector<char> buffer = Vector<char>::New(length);
+ Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
- Vector<const char> result =
- Vector<const char>(reinterpret_cast<const char*>(buffer.start()),
- length);
+ Vector<const uint8_t> result =
+ Vector<const uint8_t>(buffer.start(), length);
number = StringToDouble(isolate()->unicode_cache(),
- result,
- NO_FLAGS, // Hex, octal or trailing junk.
- 0.0);
+ // TODO(dcarney): Convert StringToDouble to uint_t.
+ Vector<const char>::cast(result),
+ NO_FLAGS, // Hex, octal or trailing junk.
+ 0.0);
buffer.Dispose();
}
SkipWhitespace();
@@ -472,8 +467,8 @@ inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
}
template <>
-inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
- seq_str->SeqAsciiStringSet(i, c);
+inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
+ seq_str->SeqOneByteStringSet(i, c);
}
template <typename StringType>
@@ -489,10 +484,10 @@ inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
}
template <>
-inline Handle<SeqAsciiString> NewRawString(Factory* factory,
+inline Handle<SeqOneByteString> NewRawString(Factory* factory,
int length,
PretenureFlag pretenure) {
- return factory->NewRawAsciiString(length, pretenure);
+ return factory->NewRawOneByteString(length, pretenure);
}
@@ -526,11 +521,11 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
// in the ASCII sink.
if (sizeof(SinkChar) == kUC16Size ||
seq_ascii ||
- c0_ <= kMaxAsciiCharCode) {
+ c0_ <= String::kMaxOneByteCharCode) {
SeqStringSet(seq_str, count++, c0_);
Advance();
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
}
} else {
@@ -566,11 +561,12 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
}
value = value * 16 + digit;
}
- if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) {
+ if (sizeof(SinkChar) == kUC16Size ||
+ value <= String::kMaxOneByteCharCode) {
SeqStringSet(seq_str, count++, value);
break;
} else {
- // StringType is SeqAsciiString and we just read a non-ASCII char.
+ // StringType is SeqOneByteString and we just read a non-ASCII char.
position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance();
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
@@ -605,7 +601,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
template <bool seq_ascii>
-template <bool is_symbol>
+template <bool is_internalized>
Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
ASSERT_EQ('"', c0_);
Advance();
@@ -614,10 +610,10 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
return factory()->empty_string();
}
- if (seq_ascii && is_symbol) {
- // Fast path for existing symbols. If the the string being parsed is not
- // a known symbol, contains backslashes or unexpectedly reaches the end of
- // string, return with an empty handle.
+ if (seq_ascii && is_internalized) {
+ // Fast path for existing internalized strings. If the the string being
+ // parsed is not a known internalized string, contains backslashes or
+ // unexpectedly reaches the end of string, return with an empty handle.
uint32_t running_hash = isolate()->heap()->HashSeed();
int position = position_;
uc32 c0 = c0_;
@@ -626,40 +622,50 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
c0_ = c0;
int beg_pos = position_;
position_ = position;
- return SlowScanJsonString<SeqAsciiString, char>(source_,
- beg_pos,
- position_);
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
+ beg_pos,
+ position_);
}
if (c0 < 0x20) return Handle<String>::null();
- running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+ if (static_cast<uint32_t>(c0) >
+ unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ running_hash =
+ StringHasher::AddCharacterCore(running_hash,
+ unibrow::Utf16::LeadSurrogate(c0));
+ running_hash =
+ StringHasher::AddCharacterCore(running_hash,
+ unibrow::Utf16::TrailSurrogate(c0));
+ } else {
+ running_hash = StringHasher::AddCharacterCore(running_hash, c0);
+ }
position++;
if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqAsciiStringGet(position);
+ c0 = seq_source_->SeqOneByteStringGet(position);
} while (c0 != '"');
int length = position - position_;
uint32_t hash = (length <= String::kMaxHashCalcLength)
? StringHasher::GetHashCore(running_hash) : length;
- Vector<const char> string_vector(
+ Vector<const uint8_t> string_vector(
seq_source_->GetChars() + position_, length);
- SymbolTable* symbol_table = isolate()->heap()->symbol_table();
- uint32_t capacity = symbol_table->Capacity();
- uint32_t entry = SymbolTable::FirstProbe(hash, capacity);
+ StringTable* string_table = isolate()->heap()->string_table();
+ uint32_t capacity = string_table->Capacity();
+ uint32_t entry = StringTable::FirstProbe(hash, capacity);
uint32_t count = 1;
while (true) {
- Object* element = symbol_table->KeyAt(entry);
+ Object* element = string_table->KeyAt(entry);
if (element == isolate()->heap()->undefined_value()) {
// Lookup failure.
break;
}
if (element != isolate()->heap()->the_hole_value() &&
- String::cast(element)->IsAsciiEqualTo(string_vector)) {
+ String::cast(element)->IsOneByteEqualTo(string_vector)) {
// Lookup success, update the current position.
position_ = position;
// Advance past the last '"'.
AdvanceSkipWhitespace();
return Handle<String>(String::cast(element), isolate());
}
- entry = SymbolTable::NextProbe(entry, count++, capacity);
+ entry = StringTable::NextProbe(entry, count++, capacity);
}
}
@@ -669,7 +675,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
// Check for control character (0x00-0x1f) or unterminated string (<0).
if (c0_ < 0x20) return Handle<String>::null();
if (c0_ != '\\') {
- if (seq_ascii || c0_ <= kMaxAsciiCharCode) {
+ if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) {
Advance();
} else {
return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
@@ -677,20 +683,18 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
position_);
}
} else {
- return SlowScanJsonString<SeqAsciiString, char>(source_,
- beg_pos,
- position_);
+ return SlowScanJsonString<SeqOneByteString, uint8_t>(source_,
+ beg_pos,
+ position_);
}
} while (c0_ != '"');
int length = position_ - beg_pos;
Handle<String> result;
- if (seq_ascii && is_symbol) {
- result = factory()->LookupAsciiSymbol(seq_source_,
- beg_pos,
- length);
+ if (seq_ascii && is_internalized) {
+ result = factory()->InternalizeOneByteString(seq_source_, beg_pos, length);
} else {
- result = factory()->NewRawAsciiString(length, pretenure_);
- char* dest = SeqAsciiString::cast(*result)->GetChars();
+ result = factory()->NewRawOneByteString(length, pretenure_);
+ uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
}
ASSERT_EQ('"', c0_);
diff --git a/src/3rdparty/v8/src/json-stringifier.h b/src/3rdparty/v8/src/json-stringifier.h
index cdb724f..e9121d4 100644
--- a/src/3rdparty/v8/src/json-stringifier.h
+++ b/src/3rdparty/v8/src/json-stringifier.h
@@ -60,7 +60,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
template <bool is_ascii, typename Char>
INLINE(void Append_(const Char* chars));
- INLINE(void Append(char c)) {
+ INLINE(void Append(uint8_t c)) {
if (is_ascii_) {
Append_<true>(c);
} else {
@@ -68,17 +68,14 @@ class BasicJsonStringifier BASE_EMBEDDED {
}
}
- INLINE(void Append(const char* chars)) {
+ INLINE(void AppendAscii(const char* chars)) {
if (is_ascii_) {
- Append_<true>(chars);
+ Append_<true>(reinterpret_cast<const uint8_t*>(chars));
} else {
- Append_<false>(chars);
+ Append_<false>(reinterpret_cast<const uint8_t*>(chars));
}
}
- Handle<Object> GetProperty(Handle<JSObject> object,
- Handle<String> key);
-
Handle<Object> ApplyToJsonFunction(Handle<Object> object,
Handle<Object> key);
@@ -94,8 +91,12 @@ class BasicJsonStringifier BASE_EMBEDDED {
// Serialize an array element.
// The index may serve as argument for the toJSON function.
- INLINE(Result SerializeElement(Handle<Object> object, int i)) {
- return Serialize_<false>(object, false, Handle<Object>(Smi::FromInt(i)));
+ INLINE(Result SerializeElement(Isolate* isolate,
+ Handle<Object> object,
+ int i)) {
+ return Serialize_<false>(object,
+ false,
+ Handle<Object>(Smi::FromInt(i), isolate));
}
// Serialize a object property.
@@ -139,8 +140,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
int length));
template <bool is_ascii, typename Char>
- INLINE(void SerializeString_(Vector<const Char> vector,
- Handle<String> string));
+ INLINE(void SerializeString_(Handle<String> string));
template <typename Char>
INLINE(bool DoNotEscape(Char c));
@@ -165,7 +165,7 @@ class BasicJsonStringifier BASE_EMBEDDED {
// (indirect) handle to it in the outermost handle scope.
Handle<JSValue> accumulator_store_;
Handle<String> current_part_;
- Handle<String> tojson_symbol_;
+ Handle<String> tojson_string_;
Handle<JSArray> stack_;
int current_index_;
int part_length_;
@@ -210,8 +210,39 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
"p\0 q\0 r\0 s\0 "
"t\0 u\0 v\0 w\0 "
"x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 ";
-
+ "|\0 }\0 ~\0 \177\0 "
+ "\200\0 \201\0 \202\0 \203\0 "
+ "\204\0 \205\0 \206\0 \207\0 "
+ "\210\0 \211\0 \212\0 \213\0 "
+ "\214\0 \215\0 \216\0 \217\0 "
+ "\220\0 \221\0 \222\0 \223\0 "
+ "\224\0 \225\0 \226\0 \227\0 "
+ "\230\0 \231\0 \232\0 \233\0 "
+ "\234\0 \235\0 \236\0 \237\0 "
+ "\240\0 \241\0 \242\0 \243\0 "
+ "\244\0 \245\0 \246\0 \247\0 "
+ "\250\0 \251\0 \252\0 \253\0 "
+ "\254\0 \255\0 \256\0 \257\0 "
+ "\260\0 \261\0 \262\0 \263\0 "
+ "\264\0 \265\0 \266\0 \267\0 "
+ "\270\0 \271\0 \272\0 \273\0 "
+ "\274\0 \275\0 \276\0 \277\0 "
+ "\300\0 \301\0 \302\0 \303\0 "
+ "\304\0 \305\0 \306\0 \307\0 "
+ "\310\0 \311\0 \312\0 \313\0 "
+ "\314\0 \315\0 \316\0 \317\0 "
+ "\320\0 \321\0 \322\0 \323\0 "
+ "\324\0 \325\0 \326\0 \327\0 "
+ "\330\0 \331\0 \332\0 \333\0 "
+ "\334\0 \335\0 \336\0 \337\0 "
+ "\340\0 \341\0 \342\0 \343\0 "
+ "\344\0 \345\0 \346\0 \347\0 "
+ "\350\0 \351\0 \352\0 \353\0 "
+ "\354\0 \355\0 \356\0 \357\0 "
+ "\360\0 \361\0 \362\0 \363\0 "
+ "\364\0 \365\0 \366\0 \367\0 "
+ "\370\0 \371\0 \372\0 \373\0 "
+ "\374\0 \375\0 \376\0 \377\0 ";
BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
: isolate_(isolate), current_index_(0), is_ascii_(true) {
@@ -219,8 +250,9 @@ BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
accumulator_store_ = Handle<JSValue>::cast(
factory_->ToObject(factory_->empty_string()));
part_length_ = kInitialPartLength;
- current_part_ = factory_->NewRawAsciiString(kInitialPartLength);
- tojson_symbol_ = factory_->LookupAsciiSymbol("toJSON");
+ current_part_ = factory_->NewRawOneByteString(kInitialPartLength);
+ tojson_string_ =
+ factory_->InternalizeOneByteString(STATIC_ASCII_VECTOR("toJSON"));
stack_ = factory_->NewJSArray(8);
}
@@ -246,7 +278,7 @@ MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
template <bool is_ascii, typename Char>
void BasicJsonStringifier::Append_(Char c) {
if (is_ascii) {
- SeqAsciiString::cast(*current_part_)->SeqAsciiStringSet(
+ SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
current_index_++, c);
} else {
SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
@@ -262,41 +294,14 @@ void BasicJsonStringifier::Append_(const Char* chars) {
}
-Handle<Object> BasicJsonStringifier::GetProperty(Handle<JSObject> object,
- Handle<String> key) {
- LookupResult lookup(isolate_);
- object->LocalLookupRealNamedProperty(*key, &lookup);
- if (!lookup.IsProperty()) return factory_->undefined_value();
- switch (lookup.type()) {
- case NORMAL: {
- Object* value = lookup.holder()->GetNormalizedProperty(&lookup);
- ASSERT(!value->IsTheHole());
- return Handle<Object>(value, isolate_);
- }
- case FIELD: {
- Object* value = lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
- ASSERT(!value->IsTheHole());
- return Handle<Object>(value, isolate_);
- }
- case CONSTANT_FUNCTION:
- return Handle<Object>(lookup.GetConstantFunction(), isolate_);
- default: {
- PropertyAttributes attr;
- return Object::GetProperty(object, object, &lookup, key, &attr);
- }
- }
- return Handle<Object>::null();
-}
-
-
Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
Handle<Object> object, Handle<Object> key) {
LookupResult lookup(isolate_);
- JSObject::cast(*object)->LookupRealNamedProperty(*tojson_symbol_, &lookup);
+ JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup);
if (!lookup.IsProperty()) return object;
PropertyAttributes attr;
Handle<Object> fun =
- Object::GetProperty(object, object, &lookup, tojson_symbol_, &attr);
+ Object::GetProperty(object, object, &lookup, tojson_string_, &attr);
if (!fun->IsJSFunction()) return object;
// Call toJSON function.
@@ -357,15 +362,15 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
switch (Oddball::cast(*object)->kind()) {
case Oddball::kFalse:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("false");
+ AppendAscii("false");
return SUCCESS;
case Oddball::kTrue:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("true");
+ AppendAscii("true");
return SUCCESS;
case Oddball::kNull:
if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("null");
+ AppendAscii("null");
return SUCCESS;
default:
return UNCHANGED;
@@ -399,8 +404,8 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
bool deferred_comma,
bool deferred_key) {
Handle<JSObject> builtins(isolate_->native_context()->builtins());
- Handle<JSFunction> builtin = Handle<JSFunction>::cast(
- v8::internal::GetProperty(builtins, "JSONSerializeAdapter"));
+ Handle<JSFunction> builtin =
+ Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter"));
Handle<Object> argv[] = { key, object };
bool has_exception = false;
@@ -429,20 +434,20 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
Handle<JSValue> object) {
bool has_exception = false;
String* class_name = object->class_name();
- if (class_name == isolate_->heap()->String_symbol()) {
+ if (class_name == isolate_->heap()->String_string()) {
Handle<Object> value = Execution::ToString(object, &has_exception);
if (has_exception) return EXCEPTION;
SerializeString(Handle<String>::cast(value));
- } else if (class_name == isolate_->heap()->Number_symbol()) {
+ } else if (class_name == isolate_->heap()->Number_string()) {
Handle<Object> value = Execution::ToNumber(object, &has_exception);
if (has_exception) return EXCEPTION;
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
} else {
- ASSERT(class_name == isolate_->heap()->Boolean_symbol());
+ ASSERT(class_name == isolate_->heap()->Boolean_string());
Object* value = JSValue::cast(*object)->value();
ASSERT(value->IsBoolean());
- Append(value->IsTrue() ? "true" : "false");
+ AppendAscii(value->IsTrue() ? "true" : "false");
}
return SUCCESS;
}
@@ -452,7 +457,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- Append(IntToCString(object->value(), buffer));
+ AppendAscii(IntToCString(object->value(), buffer));
return SUCCESS;
}
@@ -460,13 +465,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
double number) {
if (isinf(number) || isnan(number)) {
- Append("null");
+ AppendAscii("null");
return SUCCESS;
}
static const int kBufferSize = 100;
char chars[kBufferSize];
Vector<char> buffer(chars, kBufferSize);
- Append(DoubleToCString(number, buffer));
+ AppendAscii(DoubleToCString(number, buffer));
return SUCCESS;
}
@@ -503,10 +508,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
for (int i = 0; i < length; i++) {
if (i > 0) Append(',');
Result result =
- SerializeElement(Handle<Object>(elements->get(i), isolate_), i);
+ SerializeElement(isolate_,
+ Handle<Object>(elements->get(i), isolate_),
+ i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
- Append("null");
+ AppendAscii("null");
} else {
return result;
}
@@ -535,12 +542,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
if (i > 0) Append(',');
Handle<Object> element = Object::GetElement(object, i);
if (element->IsUndefined()) {
- Append("null");
+ AppendAscii("null");
} else {
- Result result = SerializeElement(element, i);
+ Result result = SerializeElement(object->GetIsolate(), element, i);
if (result == SUCCESS) continue;
if (result == UNCHANGED) {
- Append("null");
+ AppendAscii("null");
} else {
return result;
}
@@ -580,7 +587,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
map->instance_descriptors()->GetFieldIndex(i)),
isolate_);
} else {
- property = GetProperty(object, key);
+ property = GetProperty(isolate_, object, key);
if (property.is_null()) return EXCEPTION;
}
Result result = SerializeProperty(property, comma, key);
@@ -599,7 +606,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
Handle<Object> property;
if (key->IsString()) {
key_handle = Handle<String>(String::cast(key), isolate_);
- property = GetProperty(object, key_handle);
+ property = GetProperty(isolate_, object, key_handle);
} else {
ASSERT(key->IsNumber());
key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
@@ -609,7 +616,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
} else if (key_handle->AsArrayIndex(&index)) {
property = Object::GetElement(object, index);
} else {
- property = GetProperty(object, key_handle);
+ property = GetProperty(isolate_, object, key_handle);
}
}
if (property.is_null()) return EXCEPTION;
@@ -628,31 +635,8 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
void BasicJsonStringifier::ShrinkCurrentPart() {
ASSERT(current_index_ < part_length_);
- if (current_index_ == 0) {
- current_part_ = factory_->empty_string();
- return;
- }
-
- int string_size, allocated_string_size;
- if (is_ascii_) {
- allocated_string_size = SeqAsciiString::SizeFor(part_length_);
- string_size = SeqAsciiString::SizeFor(current_index_);
- } else {
- allocated_string_size = SeqTwoByteString::SizeFor(part_length_);
- string_size = SeqTwoByteString::SizeFor(current_index_);
- }
-
- int delta = allocated_string_size - string_size;
- current_part_->set_length(current_index_);
-
- // String sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- Address end_of_string = current_part_->address() + string_size;
- isolate_->heap()->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(*current_part_))) {
- MemoryChunk::IncrementLiveBytesFromMutator(
- current_part_->address(), -delta);
- }
+ current_part_ = Handle<String>(
+ SeqString::cast(*current_part_)->Truncate(current_index_), isolate_);
}
@@ -662,7 +646,7 @@ void BasicJsonStringifier::Extend() {
part_length_ *= kPartLengthGrowthFactor;
}
if (is_ascii_) {
- current_part_ = factory_->NewRawAsciiString(part_length_);
+ current_part_ = factory_->NewRawOneByteString(part_length_);
} else {
current_part_ = factory_->NewRawTwoByteString(part_length_);
}
@@ -705,9 +689,8 @@ void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
template <bool is_ascii, typename Char>
-void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
- Handle<String> string) {
- int length = vector.length();
+void BasicJsonStringifier::SerializeString_(Handle<String> string) {
+ int length = string->length();
Append_<is_ascii, char>('"');
// We make a rough estimate to find out if the current string can be
// serialized without allocating a new string part. The worst case length of
@@ -715,10 +698,12 @@ void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
// is a more pessimistic estimate, but faster to calculate.
if (((part_length_ - current_index_) >> 3) > length) {
+ AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
if (is_ascii) {
SerializeStringUnchecked_(
vector.start(),
- SeqAsciiString::cast(*current_part_)->GetChars(),
+ SeqOneByteString::cast(*current_part_)->GetChars(),
length);
} else {
SerializeStringUnchecked_(
@@ -728,13 +713,15 @@ void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
}
} else {
String* string_location = *string;
+ Vector<const Char> vector = GetCharVector<Char>(string);
for (int i = 0; i < length; i++) {
Char c = vector[i];
if (DoNotEscape(c)) {
Append_<is_ascii, Char>(c);
} else {
- Append_<is_ascii, char>(
- &JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
+ Append_<is_ascii, uint8_t>(
+ reinterpret_cast<const uint8_t*>(
+ &JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
}
// If GC moved the string, we need to refresh the vector.
if (*string != string_location) {
@@ -744,27 +731,28 @@ void BasicJsonStringifier::SerializeString_(Vector<const Char> vector,
}
}
- Append_<is_ascii, char>('"');
+ Append_<is_ascii, uint8_t>('"');
}
template <>
-bool BasicJsonStringifier::DoNotEscape(char c) {
+bool BasicJsonStringifier::DoNotEscape(uint8_t c) {
return c >= '#' && c <= '~' && c != '\\';
}
template <>
-bool BasicJsonStringifier::DoNotEscape(uc16 c) {
- return (c >= 0x80) || (c >= '#' && c <= '~' && c != '\\');
+bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
+ return c >= '#' && c != '\\' && c != 0x7f;
}
template <>
-Vector<const char> BasicJsonStringifier::GetCharVector(Handle<String> string) {
+Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
+ Handle<String> string) {
String::FlatContent flat = string->GetFlatContent();
ASSERT(flat.IsAscii());
- return flat.ToAsciiVector();
+ return flat.ToOneByteVector();
}
@@ -781,16 +769,16 @@ void BasicJsonStringifier::SerializeString(Handle<String> object) {
String::FlatContent flat = object->GetFlatContent();
if (is_ascii_) {
if (flat.IsAscii()) {
- SerializeString_<true, char>(flat.ToAsciiVector(), object);
+ SerializeString_<true, uint8_t>(object);
} else {
ChangeEncoding();
SerializeString(object);
}
} else {
if (flat.IsAscii()) {
- SerializeString_<false, char>(flat.ToAsciiVector(), object);
+ SerializeString_<false, uint8_t>(object);
} else {
- SerializeString_<false, uc16>(flat.ToUC16Vector(), object);
+ SerializeString_<false, uc16>(object);
}
}
}
diff --git a/src/3rdparty/v8/src/json.js b/src/3rdparty/v8/src/json.js
index 9ab1a31..e94d3c8 100644
--- a/src/3rdparty/v8/src/json.js
+++ b/src/3rdparty/v8/src/json.js
@@ -193,10 +193,10 @@ function JSONStringify(value, replacer, space) {
var gap;
if (IS_NUMBER(space)) {
space = MathMax(0, MathMin(ToInteger(space), 10));
- gap = SubString(" ", 0, space);
+ gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
- gap = SubString(space, 0, 10);
+ gap = %_SubString(space, 0, 10);
} else {
gap = space;
}
diff --git a/src/3rdparty/v8/src/jsregexp-inl.h b/src/3rdparty/v8/src/jsregexp-inl.h
new file mode 100644
index 0000000..3ef07d8
--- /dev/null
+++ b/src/3rdparty/v8/src/jsregexp-inl.h
@@ -0,0 +1,106 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_JSREGEXP_INL_H_
+#define V8_JSREGEXP_INL_H_
+
+#include "allocation.h"
+#include "handles.h"
+#include "heap.h"
+#include "jsregexp.h"
+#include "objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+RegExpImpl::GlobalCache::~GlobalCache() {
+ // Deallocate the register array if we allocated it in the constructor
+ // (as opposed to using the existing jsregexp_static_offsets_vector).
+ if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
+ DeleteArray(register_array_);
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::FetchNext() {
+ current_match_index_++;
+ if (current_match_index_ >= num_matches_) {
+ // Current batch of results exhausted.
+ // Fail if last batch was not even fully filled.
+ if (num_matches_ < max_matches_) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+
+ int32_t* last_match =
+ &register_array_[(current_match_index_ - 1) * registers_per_match_];
+ int last_end_index = last_match[1];
+
+ if (regexp_->TypeTag() == JSRegExp::ATOM) {
+ num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ } else {
+ int last_start_index = last_match[0];
+ if (last_start_index == last_end_index) last_end_index++;
+ if (last_end_index > subject_->length()) {
+ num_matches_ = 0; // Signal failed match.
+ return NULL;
+ }
+ num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
+ subject_,
+ last_end_index,
+ register_array_,
+ register_array_size_);
+ }
+
+ if (num_matches_ <= 0) return NULL;
+ current_match_index_ = 0;
+ return register_array_;
+ } else {
+ return &register_array_[current_match_index_ * registers_per_match_];
+ }
+}
+
+
+int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
+ int index = current_match_index_ * registers_per_match_;
+ if (num_matches_ == 0) {
+ // After a failed match we shift back by one result.
+ index -= registers_per_match_;
+ }
+ return &register_array_[index];
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_JSREGEXP_INL_H_
diff --git a/src/3rdparty/v8/src/jsregexp.cc b/src/3rdparty/v8/src/jsregexp.cc
index e59170d..e73b1d4 100644
--- a/src/3rdparty/v8/src/jsregexp.cc
+++ b/src/3rdparty/v8/src/jsregexp.cc
@@ -32,6 +32,7 @@
#include "execution.h"
#include "factory.h"
#include "jsregexp.h"
+#include "jsregexp-inl.h"
#include "platform.h"
#include "string-search.h"
#include "runtime.h"
@@ -269,7 +270,7 @@ static void SetAtomLastCapture(FixedArray* array,
String* subject,
int from,
int to) {
- NoHandleAllocation no_handles;
+ NoHandleAllocation no_handles(array->GetIsolate());
RegExpImpl::SetLastCaptureCount(array, 2);
RegExpImpl::SetLastSubject(array, subject);
RegExpImpl::SetLastInput(array, subject);
@@ -309,16 +310,16 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp,
index = (needle_content.IsAscii()
? (subject_content.IsAscii()
? SearchString(isolate,
- subject_content.ToAsciiVector(),
- needle_content.ToAsciiVector(),
+ subject_content.ToOneByteVector(),
+ needle_content.ToOneByteVector(),
index)
: SearchString(isolate,
subject_content.ToUC16Vector(),
- needle_content.ToAsciiVector(),
+ needle_content.ToOneByteVector(),
index))
: (subject_content.IsAscii()
? SearchString(isolate,
- subject_content.ToAsciiVector(),
+ subject_content.ToOneByteVector(),
needle_content.ToUC16Vector(),
index)
: SearchString(isolate,
@@ -352,7 +353,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re,
if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
- NoHandleAllocation no_handles;
+ NoHandleAllocation no_handles(isolate);
FixedArray* array = FixedArray::cast(last_match_info->elements());
SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
return last_match_info;
@@ -529,7 +530,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
if (!subject->IsFlat()) FlattenString(subject);
// Check the asciiness of the underlying storage.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
#ifdef V8_INTERPRETED_REGEXP
@@ -560,7 +561,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
@@ -596,7 +597,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
- is_ascii = subject->IsAsciiRepresentationUnderneath();
+ is_ascii = subject->IsOneByteRepresentationUnderneath();
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
@@ -686,6 +687,7 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info,
Handle<String> subject,
int capture_count,
int32_t* match) {
+ ASSERT(last_match_info->HasFastObjectElements());
int capture_register_count = (capture_count + 1) * 2;
last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead);
AssertNoAllocation no_gc;
@@ -760,68 +762,6 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp,
}
-RegExpImpl::GlobalCache::~GlobalCache() {
- // Deallocate the register array if we allocated it in the constructor
- // (as opposed to using the existing jsregexp_static_offsets_vector).
- if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) {
- DeleteArray(register_array_);
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::FetchNext() {
- current_match_index_++;
- if (current_match_index_ >= num_matches_) {
- // Current batch of results exhausted.
- // Fail if last batch was not even fully filled.
- if (num_matches_ < max_matches_) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
-
- int32_t* last_match =
- &register_array_[(current_match_index_ - 1) * registers_per_match_];
- int last_end_index = last_match[1];
-
- if (regexp_->TypeTag() == JSRegExp::ATOM) {
- num_matches_ = RegExpImpl::AtomExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- } else {
- int last_start_index = last_match[0];
- if (last_start_index == last_end_index) last_end_index++;
- if (last_end_index > subject_->length()) {
- num_matches_ = 0; // Signal failed match.
- return NULL;
- }
- num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_,
- subject_,
- last_end_index,
- register_array_,
- register_array_size_);
- }
-
- if (num_matches_ <= 0) return NULL;
- current_match_index_ = 0;
- return register_array_;
- } else {
- return &register_array_[current_match_index_ * registers_per_match_];
- }
-}
-
-
-int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() {
- int index = current_match_index_ * registers_per_match_;
- if (num_matches_ == 0) {
- // After a failed match we shift back by one result.
- index -= registers_per_match_;
- }
- return &register_array_[index];
-}
-
-
// -------------------------------------------------------------------
// Implementation of the Irregexp regular expression engine.
//
@@ -1681,7 +1621,7 @@ static int GetCaseIndependentLetters(Isolate* isolate,
letters[0] = character;
length = 1;
}
- if (!ascii_subject || character <= String::kMaxAsciiCharCode) {
+ if (!ascii_subject || character <= String::kMaxOneByteCharCode) {
return length;
}
// The standard requires that non-ASCII characters cannot have ASCII
@@ -1732,7 +1672,7 @@ static inline bool EmitAtomNonLetter(Isolate* isolate,
bool checked = false;
// We handle the length > 1 case in a later pass.
if (length == 1) {
- if (ascii && c > String::kMaxAsciiCharCodeU) {
+ if (ascii && c > String::kMaxOneByteCharCodeU) {
// Can't match - see above.
return false; // Bounds not checked.
}
@@ -1753,7 +1693,7 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
Label* on_failure) {
uc16 char_mask;
if (ascii) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2007,7 +1947,7 @@ static void SplitSearchSpace(ZoneList<int>* ranges,
// range with a single not-taken branch, speeding up this important
// character range (even non-ASCII charset-based text has spaces and
// punctuation).
- if (*border - 1 > String::kMaxAsciiCharCode && // ASCII case.
+ if (*border - 1 > String::kMaxOneByteCharCode && // ASCII case.
end_index - start_index > (*new_start_index - start_index) * 2 &&
last - first > kSize * 2 &&
binary_chop_index > *new_start_index &&
@@ -2211,7 +2151,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
int max_char;
if (ascii) {
- max_char = String::kMaxAsciiCharCode;
+ max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
}
@@ -2359,35 +2299,33 @@ RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
int ActionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input!
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
void ActionNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
if (type_ == BEGIN_SUBMATCH) {
bm->SetRest(offset);
} else if (type_ != POSITIVE_SUBMATCH_SUCCESS) {
- on_success()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
int AssertionNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
// If we know we are not at the start and we are asked "how many characters
// will you match if you succeed?" then we can answer anything since false
// implies false. So lets just return the max answer (still_to_find) since
@@ -2395,55 +2333,53 @@ int AssertionNode::EatsAtLeast(int still_to_find,
// branches in the node graph.
if (type() == AT_START && not_at_start) return still_to_find;
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
void AssertionNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
// Match the behaviour of EatsAtLeast on this node.
if (type() == AT_START && not_at_start) return;
- on_success()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
int BackReferenceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
return on_success()->EatsAtLeast(still_to_find,
- recursion_depth + 1,
+ budget - 1,
not_at_start);
}
int TextNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
int answer = Length();
if (answer >= still_to_find) return answer;
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer;
+ if (budget <= 0) return answer;
// We are not at start after this node so we set the last argument to 'true'.
return answer + on_success()->EatsAtLeast(still_to_find - answer,
- recursion_depth + 1,
+ budget - 1,
true);
}
int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- return node->EatsAtLeast(still_to_find, recursion_depth + 1, not_at_start);
+ return node->EatsAtLeast(still_to_find, budget - 1, not_at_start);
}
@@ -2460,39 +2396,40 @@ void NegativeLookaheadChoiceNode::GetQuickCheckDetails(
int ChoiceNode::EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
+ int budget,
RegExpNode* ignore_this_node,
bool not_at_start) {
- if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
+ if (budget <= 0) return 0;
int min = 100;
int choice_count = alternatives_->length();
+ budget = (budget - 1) / choice_count;
for (int i = 0; i < choice_count; i++) {
RegExpNode* node = alternatives_->at(i).node();
if (node == ignore_this_node) continue;
- int node_eats_at_least = node->EatsAtLeast(still_to_find,
- recursion_depth + 1,
- not_at_start);
+ int node_eats_at_least =
+ node->EatsAtLeast(still_to_find, budget, not_at_start);
if (node_eats_at_least < min) min = node_eats_at_least;
+ if (min == 0) return 0;
}
return min;
}
int LoopChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
return EatsAtLeastHelper(still_to_find,
- recursion_depth,
+ budget - 1,
loop_node_,
not_at_start);
}
int ChoiceNode::EatsAtLeast(int still_to_find,
- int recursion_depth,
+ int budget,
bool not_at_start) {
return EatsAtLeastHelper(still_to_find,
- recursion_depth,
+ budget,
NULL,
not_at_start);
}
@@ -2513,7 +2450,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
bool found_useful_op = false;
uint32_t char_mask;
if (asc) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2522,7 +2459,7 @@ bool QuickCheckDetails::Rationalize(bool asc) {
int char_shift = 0;
for (int i = 0; i < characters_; i++) {
Position* pos = &positions_[i];
- if ((pos->mask & String::kMaxAsciiCharCode) != 0) {
+ if ((pos->mask & String::kMaxOneByteCharCode) != 0) {
found_useful_op = true;
}
mask_ |= (pos->mask & char_mask) << char_shift;
@@ -2565,7 +2502,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
// load so the value is already masked down.
uint32_t char_mask;
if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2575,7 +2512,11 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
// For 2-character preloads in ASCII mode or 1-character preloads in
// TWO_BYTE mode we also use a 16 bit load with zero extend.
if (details->characters() == 2 && compiler->ascii()) {
- if ((mask & 0x7f7f) == 0x7f7f) need_mask = false;
+#ifndef ENABLE_LATIN_1
+ if ((mask & 0x7f7f) == 0xffff) need_mask = false;
+#else
+ if ((mask & 0xffff) == 0xffff) need_mask = false;
+#endif
} else if (details->characters() == 1 && !compiler->ascii()) {
if ((mask & 0xffff) == 0xffff) need_mask = false;
} else {
@@ -2617,7 +2558,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
int characters = details->characters();
int char_mask;
if (compiler->ascii()) {
- char_mask = String::kMaxAsciiCharCode;
+ char_mask = String::kMaxOneByteCharCode;
} else {
char_mask = String::kMaxUtf16CodeUnit;
}
@@ -2834,24 +2775,47 @@ class VisitMarker {
};
-RegExpNode* SeqRegExpNode::FilterASCII(int depth) {
+RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
ASSERT(!info()->visited);
VisitMarker marker(info());
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, ignore_case);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) {
- RegExpNode* next = on_success_->FilterASCII(depth - 1);
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
+ RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case);
if (next == NULL) return set_replacement(NULL);
on_success_ = next;
return set_replacement(this);
}
-RegExpNode* TextNode::FilterASCII(int depth) {
+// We need to check for the following characters: 0x39c 0x3bc 0x178.
+static inline bool RangeContainsLatin1Equivalents(CharacterRange range) {
+#ifdef ENABLE_LATIN_1
+ // TODO(dcarney): this could be a lot more efficient.
+ return range.Contains(0x39c) ||
+ range.Contains(0x3bc) || range.Contains(0x178);
+#else
+ return false;
+#endif
+}
+
+
+#ifdef ENABLE_LATIN_1
+static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
+ for (int i = 0; i < ranges->length(); i++) {
+ // TODO(dcarney): this could be a lot more efficient.
+ if (RangeContainsLatin1Equivalents(ranges->at(i))) return true;
+ }
+ return false;
+}
+#endif
+
+
+RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
ASSERT(!info()->visited);
@@ -2862,12 +2826,23 @@ RegExpNode* TextNode::FilterASCII(int depth) {
if (elm.type == TextElement::ATOM) {
Vector<const uc16> quarks = elm.data.u_atom->data();
for (int j = 0; j < quarks.length(); j++) {
- // We don't need special handling for case independence
- // because of the rule that case independence cannot make
- // a non-ASCII character match an ASCII character.
- if (quarks[j] > String::kMaxAsciiCharCode) {
+#ifndef ENABLE_LATIN_1
+ if (quarks[j] > String::kMaxOneByteCharCode) {
return set_replacement(NULL);
}
+#else
+ uint16_t c = quarks[j];
+ if (c <= String::kMaxOneByteCharCode) continue;
+ if (!ignore_case) return set_replacement(NULL);
+ // Here, we need to check for characters whose upper and lower cases
+ // are outside the Latin-1 range.
+ uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c);
+ // Character is outside Latin-1 completely
+ if (converted == 0) return set_replacement(NULL);
+ // Convert quark to Latin-1 in place.
+ uint16_t* copy = const_cast<uint16_t*>(quarks.start());
+ copy[j] = converted;
+#endif
}
} else {
ASSERT(elm.type == TextElement::CHAR_CLASS);
@@ -2881,39 +2856,48 @@ RegExpNode* TextNode::FilterASCII(int depth) {
if (cc->is_negated()) {
if (range_count != 0 &&
ranges->at(0).from() == 0 &&
- ranges->at(0).to() >= String::kMaxAsciiCharCode) {
+ ranges->at(0).to() >= String::kMaxOneByteCharCode) {
+#ifdef ENABLE_LATIN_1
+ // This will be handled in a later filter.
+ if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
+#endif
return set_replacement(NULL);
}
} else {
if (range_count == 0 ||
- ranges->at(0).from() > String::kMaxAsciiCharCode) {
+ ranges->at(0).from() > String::kMaxOneByteCharCode) {
+#ifdef ENABLE_LATIN_1
+ // This will be handled in a later filter.
+ if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue;
+#endif
return set_replacement(NULL);
}
}
}
}
- return FilterSuccessor(depth - 1);
+ return FilterSuccessor(depth - 1, ignore_case);
}
-RegExpNode* LoopChoiceNode::FilterASCII(int depth) {
+RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
{
VisitMarker marker(info());
- RegExpNode* continue_replacement = continue_node_->FilterASCII(depth - 1);
+ RegExpNode* continue_replacement =
+ continue_node_->FilterASCII(depth - 1, ignore_case);
// If we can't continue after the loop then there is no sense in doing the
// loop.
if (continue_replacement == NULL) return set_replacement(NULL);
}
- return ChoiceNode::FilterASCII(depth - 1);
+ return ChoiceNode::FilterASCII(depth - 1, ignore_case);
}
-RegExpNode* ChoiceNode::FilterASCII(int depth) {
+RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2932,7 +2916,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
RegExpNode* survivor = NULL;
for (int i = 0; i < choice_count; i++) {
GuardedAlternative alternative = alternatives_->at(i);
- RegExpNode* replacement = alternative.node()->FilterASCII(depth - 1);
+ RegExpNode* replacement =
+ alternative.node()->FilterASCII(depth - 1, ignore_case);
ASSERT(replacement != this); // No missing EMPTY_MATCH_CHECK.
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
@@ -2952,7 +2937,7 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
for (int i = 0; i < choice_count; i++) {
RegExpNode* replacement =
- alternatives_->at(i).node()->FilterASCII(depth - 1);
+ alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case);
if (replacement != NULL) {
alternatives_->at(i).set_node(replacement);
new_alternatives->Add(alternatives_->at(i), zone());
@@ -2963,7 +2948,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) {
}
-RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) {
+RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
+ bool ignore_case) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2971,12 +2957,12 @@ RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) {
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
RegExpNode* node = alternatives_->at(1).node();
- RegExpNode* replacement = node->FilterASCII(depth - 1);
+ RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case);
if (replacement == NULL) return set_replacement(NULL);
alternatives_->at(1).set_node(replacement);
RegExpNode* neg_node = alternatives_->at(0).node();
- RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1);
+ RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case);
// If the negative lookahead is always going to fail then
// we don't need to check it.
if (neg_replacement == NULL) return set_replacement(replacement);
@@ -2999,19 +2985,15 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details,
void LoopChoiceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- if (body_can_be_zero_length_ ||
- recursion_depth > RegExpCompiler::kMaxRecursion ||
- budget <= 0) {
+ if (body_can_be_zero_length_ || budget <= 0) {
bm->SetRest(offset);
SaveBMInfo(bm, not_at_start, offset);
return;
}
- ChoiceNode::FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ ChoiceNode::FillInBMInfo(offset, budget - 1, bm, not_at_start);
SaveBMInfo(bm, not_at_start, offset);
}
@@ -3108,12 +3090,13 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) {
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
if (lookahead == NULL) {
int eats_at_least =
- Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+ Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget,
+ not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone());
- FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
+ FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE;
if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE;
}
@@ -3299,7 +3282,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler,
switch (pass) {
case NON_ASCII_MATCH:
ASSERT(ascii);
- if (quarks[j] > String::kMaxAsciiCharCode) {
+ if (quarks[j] > String::kMaxOneByteCharCode) {
assembler->GoTo(backtrack);
return;
}
@@ -3498,7 +3481,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode(
if (ranges->length() != 1) return NULL;
uint32_t max_char;
if (compiler->ascii()) {
- max_char = String::kMaxAsciiCharCode;
+ max_char = String::kMaxOneByteCharCode;
} else {
max_char = String::kMaxUtf16CodeUnit;
}
@@ -3698,7 +3681,7 @@ BoyerMooreLookahead::BoyerMooreLookahead(
: length_(length),
compiler_(compiler) {
if (compiler->ascii()) {
- max_char_ = String::kMaxAsciiCharCode;
+ max_char_ = String::kMaxOneByteCharCode;
} else {
max_char_ = String::kMaxUtf16CodeUnit;
}
@@ -4045,16 +4028,17 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes.
BoyerMooreLookahead* lookahead = bm_info(not_at_start);
if (lookahead == NULL) {
- eats_at_least =
- Min(kMaxLookaheadForBoyerMoore,
- EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start));
+ eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+ EatsAtLeast(kMaxLookaheadForBoyerMoore,
+ kRecursionBudget,
+ not_at_start));
if (eats_at_least >= 1) {
BoyerMooreLookahead* bm =
new(zone()) BoyerMooreLookahead(eats_at_least,
compiler,
zone());
GuardedAlternative alt0 = alternatives_->at(0);
- alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start);
+ alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
}
} else {
@@ -4066,7 +4050,8 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
if (eats_at_least == kEatsAtLeastNotYetInitialized) {
// Save some time by looking at most one machine word ahead.
- eats_at_least = EatsAtLeast(compiler->ascii() ? 4 : 2, 0, not_at_start);
+ eats_at_least =
+ EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start);
}
int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
@@ -5336,9 +5321,9 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
Isolate* isolate = Isolate::Current();
uc16 bottom = from();
uc16 top = to();
- if (is_ascii) {
- if (bottom > String::kMaxAsciiCharCode) return;
- if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode;
+ if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
+ if (bottom > String::kMaxOneByteCharCode) return;
+ if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
@@ -5822,7 +5807,6 @@ void Analysis::VisitAssertion(AssertionNode* that) {
void BackReferenceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5838,7 +5822,6 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize ==
void ChoiceNode::FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5851,15 +5834,13 @@ void ChoiceNode::FillInBMInfo(int offset,
SaveBMInfo(bm, not_at_start, offset);
return;
}
- alt.node()->FillInBMInfo(
- offset, recursion_depth + 1, budget, bm, not_at_start);
+ alt.node()->FillInBMInfo(offset, budget, bm, not_at_start);
}
SaveBMInfo(bm, not_at_start, offset);
}
void TextNode::FillInBMInfo(int initial_offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -5885,7 +5866,7 @@ void TextNode::FillInBMInfo(int initial_offset,
int length = GetCaseIndependentLetters(
ISOLATE,
character,
- bm->max_char() == String::kMaxAsciiCharCode,
+ bm->max_char() == String::kMaxOneByteCharCode,
chars);
for (int j = 0; j < length; j++) {
bm->Set(offset, chars[j]);
@@ -5916,7 +5897,6 @@ void TextNode::FillInBMInfo(int initial_offset,
return;
}
on_success()->FillInBMInfo(offset,
- recursion_depth + 1,
budget - 1,
bm,
true); // Not at start after a text node.
@@ -6099,10 +6079,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
}
}
if (is_ascii) {
- node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+ node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
// Do it again to propagate the new nodes to places where they were not
// put because they had not been calculated yet.
- if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion);
+ if (node != NULL) {
+ node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+ }
}
if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
diff --git a/src/3rdparty/v8/src/jsregexp.h b/src/3rdparty/v8/src/jsregexp.h
index 96825ce..625f192 100644
--- a/src/3rdparty/v8/src/jsregexp.h
+++ b/src/3rdparty/v8/src/jsregexp.h
@@ -153,17 +153,17 @@ class RegExpImpl {
bool is_global,
Isolate* isolate);
- ~GlobalCache();
+ INLINE(~GlobalCache());
// Fetch the next entry in the cache for global regexp match results.
// This does not set the last match info. Upon failure, NULL is returned.
// The cause can be checked with Result(). The previous
// result is still in available in memory when a failure happens.
- int32_t* FetchNext();
+ INLINE(int32_t* FetchNext());
- int32_t* LastSuccessfulMatch();
+ INLINE(int32_t* LastSuccessfulMatch());
- inline bool HasException() { return num_matches_ < 0; }
+ INLINE(bool HasException()) { return num_matches_ < 0; }
private:
int num_matches_;
@@ -582,9 +582,7 @@ class RegExpNode: public ZoneObject {
// used to indicate that we know we are not at the start of the input. In
// this case anchored branches will always fail and can be ignored when
// determining how many characters are consumed on success.
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start) = 0;
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0;
// Emits some quick code that checks whether the preloaded characters match.
// Falls through on certain failure, jumps to the label on possible success.
// If the node cannot make a quick check it does nothing and returns false.
@@ -616,9 +614,8 @@ class RegExpNode: public ZoneObject {
// implementation. TODO(erikcorry): This should share more code with
// EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit
// the number of nodes we are willing to look at in order to create this data.
- static const int kFillInBMBudget = 200;
+ static const int kRecursionBudget = 200;
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -628,7 +625,7 @@ class RegExpNode: public ZoneObject {
// If we know that the input is ASCII then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or NULL if the node can never match.
- virtual RegExpNode* FilterASCII(int depth) { return this; }
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; }
// Helper for FilterASCII.
RegExpNode* replacement() {
ASSERT(info()->replacement_calculated);
@@ -723,19 +720,17 @@ class SeqRegExpNode: public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) { }
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
- on_success_->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ on_success_->FillInBMInfo(offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
protected:
- RegExpNode* FilterSuccessor(int depth);
+ RegExpNode* FilterSuccessor(int depth, bool ignore_case);
private:
RegExpNode* on_success_;
@@ -773,9 +768,7 @@ class ActionNode: public SeqRegExpNode {
RegExpNode* on_success);
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
@@ -784,7 +777,6 @@ class ActionNode: public SeqRegExpNode {
details, compiler, filled_in, not_at_start);
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -843,9 +835,7 @@ class TextNode: public SeqRegExpNode {
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
@@ -856,12 +846,11 @@ class TextNode: public SeqRegExpNode {
virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
void CalculateOffsets();
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
private:
enum TextEmitPassType {
@@ -911,15 +900,12 @@ class AssertionNode: public SeqRegExpNode {
}
virtual void Accept(NodeVisitor* visitor);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -960,7 +946,6 @@ class BackReferenceNode: public SeqRegExpNode {
return;
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -989,7 +974,6 @@ class EndNode: public RegExpNode {
UNREACHABLE();
}
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
@@ -1075,11 +1059,9 @@ class ChoiceNode: public RegExpNode {
ZoneList<GuardedAlternative>* alternatives() { return alternatives_; }
DispatchTable* GetTable(bool ignore_case);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
int EatsAtLeastHelper(int still_to_find,
- int recursion_depth,
+ int budget,
RegExpNode* ignore_this_node,
bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
@@ -1087,7 +1069,6 @@ class ChoiceNode: public RegExpNode {
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -1097,7 +1078,7 @@ class ChoiceNode: public RegExpNode {
void set_not_at_start() { not_at_start_ = true; }
void set_being_calculated(bool b) { being_calculated_ = b; }
virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
protected:
int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1133,20 +1114,17 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
AddAlternative(this_must_fail);
AddAlternative(then_do_this);
}
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start) {
alternatives_->at(1).node()->FillInBMInfo(
- offset, recursion_depth + 1, budget - 1, bm, not_at_start);
+ offset, budget - 1, bm, not_at_start);
if (offset == 0) set_bm_info(not_at_start, bm);
}
// For a negative lookahead we don't emit the quick check for the
@@ -1155,7 +1133,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
};
@@ -1169,15 +1147,12 @@ class LoopChoiceNode: public ChoiceNode {
void AddLoopAlternative(GuardedAlternative alt);
void AddContinueAlternative(GuardedAlternative alt);
virtual void Emit(RegExpCompiler* compiler, Trace* trace);
- virtual int EatsAtLeast(int still_to_find,
- int recursion_depth,
- bool not_at_start);
+ virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start);
virtual void GetQuickCheckDetails(QuickCheckDetails* details,
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
virtual void FillInBMInfo(int offset,
- int recursion_depth,
int budget,
BoyerMooreLookahead* bm,
bool not_at_start);
@@ -1185,7 +1160,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpNode* continue_node() { return continue_node_; }
bool body_can_be_zero_length() { return body_can_be_zero_length_; }
virtual void Accept(NodeVisitor* visitor);
- virtual RegExpNode* FilterASCII(int depth);
+ virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
private:
// AddAlternative is made private for loop nodes because alternatives
diff --git a/src/3rdparty/v8/src/list-inl.h b/src/3rdparty/v8/src/list-inl.h
index 60a033d..7a84313 100644
--- a/src/3rdparty/v8/src/list-inl.h
+++ b/src/3rdparty/v8/src/list-inl.h
@@ -85,8 +85,9 @@ void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
template<typename T, class P>
void List<T, P>::Resize(int new_capacity, P alloc) {
+ ASSERT_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- memcpy(new_data, data_, capacity_ * sizeof(T));
+ memcpy(new_data, data_, length_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
@@ -162,6 +163,14 @@ void List<T, P>::Rewind(int pos) {
template<typename T, class P>
+void List<T, P>::Trim(P alloc) {
+ if (length_ < capacity_ / 4) {
+ Resize(capacity_ / 2, alloc);
+ }
+}
+
+
+template<typename T, class P>
void List<T, P>::Iterate(void (*callback)(T* x)) {
for (int i = 0; i < length_; i++) callback(&data_[i]);
}
diff --git a/src/3rdparty/v8/src/list.h b/src/3rdparty/v8/src/list.h
index 7fd4f5c..43d982f 100644
--- a/src/3rdparty/v8/src/list.h
+++ b/src/3rdparty/v8/src/list.h
@@ -149,6 +149,9 @@ class List {
// Drop the last 'count' elements from the list.
INLINE(void RewindBy(int count)) { Rewind(length_ - count); }
+ // Halve the capacity if fill level is less than a quarter.
+ INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy()));
+
bool Contains(const T& elm) const;
int CountOccurrences(const T& elm, int start, int end) const;
diff --git a/src/3rdparty/v8/src/lithium-allocator-inl.h b/src/3rdparty/v8/src/lithium-allocator-inl.h
index 8f660ce..a6d053a 100644
--- a/src/3rdparty/v8/src/lithium-allocator-inl.h
+++ b/src/3rdparty/v8/src/lithium-allocator-inl.h
@@ -99,6 +99,7 @@ bool InputIterator::Done() { return current_ >= limit_; }
LOperand* InputIterator::Current() {
ASSERT(!Done());
+ ASSERT(instr_->InputAt(current_) != NULL);
return instr_->InputAt(current_);
}
@@ -110,7 +111,9 @@ void InputIterator::Advance() {
void InputIterator::SkipUninteresting() {
- while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) {
+ while (current_ < limit_) {
+ LOperand* current = instr_->InputAt(current_);
+ if (current != NULL && !current->IsConstantOperand()) break;
++current_;
}
}
@@ -127,9 +130,11 @@ bool UseIterator::Done() {
LOperand* UseIterator::Current() {
ASSERT(!Done());
- return input_iterator_.Done()
+ LOperand* result = input_iterator_.Done()
? env_iterator_.Current()
: input_iterator_.Current();
+ ASSERT(result != NULL);
+ return result;
}
@@ -139,6 +144,21 @@ void UseIterator::Advance() {
: input_iterator_.Advance();
}
+
+void LAllocator::SetLiveRangeAssignedRegister(
+ LiveRange* range,
+ int reg,
+ RegisterKind register_kind,
+ Zone* zone) {
+ if (register_kind == DOUBLE_REGISTERS) {
+ assigned_double_registers_->Add(reg);
+ } else {
+ assigned_registers_->Add(reg);
+ }
+ range->set_assigned_register(reg, register_kind, zone);
+}
+
+
} } // namespace v8::internal
#endif // V8_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/3rdparty/v8/src/lithium-allocator.cc b/src/3rdparty/v8/src/lithium-allocator.cc
index 91a9811..dcfbead 100644
--- a/src/3rdparty/v8/src/lithium-allocator.cc
+++ b/src/3rdparty/v8/src/lithium-allocator.cc
@@ -606,7 +606,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block,
int LAllocator::FixedDoubleLiveRangeID(int index) {
- return -index - 1 - Register::kNumAllocatableRegisters;
+ return -index - 1 - Register::kMaxNumAllocatableRegisters;
}
@@ -638,12 +638,12 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
LiveRange* LAllocator::FixedLiveRangeFor(int index) {
- ASSERT(index < Register::kNumAllocatableRegisters);
+ ASSERT(index < Register::kMaxNumAllocatableRegisters);
LiveRange* result = fixed_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_);
ASSERT(result->IsFixed());
- result->set_assigned_register(index, GENERAL_REGISTERS, zone_);
+ SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_);
fixed_live_ranges_[index] = result;
}
return result;
@@ -651,12 +651,12 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) {
LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
- ASSERT(index < DoubleRegister::kNumAllocatableRegisters);
+ ASSERT(index < DoubleRegister::NumAllocatableRegisters());
LiveRange* result = fixed_double_live_ranges_[index];
if (result == NULL) {
result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_);
ASSERT(result->IsFixed());
- result->set_assigned_register(index, DOUBLE_REGISTERS, zone_);
+ SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_);
fixed_double_live_ranges_[index] = result;
}
return result;
@@ -768,6 +768,7 @@ void LAllocator::AddConstraintsGapMove(int index,
void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
+ if (start == -1) return;
for (int i = start; i <= end; ++i) {
if (IsGapAt(i)) {
LInstruction* instr = NULL;
@@ -946,8 +947,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
Define(curr_position, output, NULL);
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersRegisters()) {
+ for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
if (output == NULL || !output->IsRegister() ||
output->index() != i) {
LiveRange* range = FixedLiveRangeFor(i);
@@ -958,8 +959,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
}
}
- if (instr->IsMarkedAsCall()) {
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ if (instr->ClobbersDoubleRegisters()) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
if (output == NULL || !output->IsDoubleRegister() ||
output->index() != i) {
LiveRange* range = FixedDoubleLiveRangeFor(i);
@@ -989,7 +990,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
for (TempIterator it(instr); !it.Done(); it.Advance()) {
LOperand* temp = it.Current();
- if (instr->IsMarkedAsCall()) {
+ if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
LUnallocated* temp_unalloc = LUnallocated::cast(temp);
@@ -1065,6 +1066,13 @@ void LAllocator::ResolvePhis(HBasicBlock* block) {
bool LAllocator::Allocate(LChunk* chunk) {
ASSERT(chunk_ == NULL);
chunk_ = static_cast<LPlatformChunk*>(chunk);
+ assigned_registers_ =
+ new(zone()) BitVector(Register::NumAllocatableRegisters(), zone());
+ assigned_registers_->Clear();
+ assigned_double_registers_ =
+ new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(),
+ zone());
+ assigned_double_registers_->Clear();
MeetRegisterConstraints();
if (!AllocationOk()) return false;
ResolvePhis();
@@ -1324,8 +1332,14 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
- PrintF("Function: %s\n",
- *chunk_->info()->function()->debug_name()->ToCString());
+ if (chunk_->info()->IsStub()) {
+ CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
+ PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
+ } else {
+ ASSERT(chunk_->info()->IsOptimizing());
+ PrintF("Function: %s\n",
+ *chunk_->info()->function()->debug_name()->ToCString());
+ }
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
PrintF("First use is at %d\n", range->first_pos()->pos().Value());
@@ -1411,7 +1425,7 @@ void LAllocator::PopulatePointerMaps() {
LifetimePosition safe_point_pos =
LifetimePosition::FromInstructionIndex(safe_point);
LiveRange* cur = range;
- while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) {
+ while (cur != NULL && !cur->Covers(safe_point_pos)) {
cur = cur->next();
}
if (cur == NULL) continue;
@@ -1471,14 +1485,14 @@ void LAllocator::ProcessOsrEntry() {
void LAllocator::AllocateGeneralRegisters() {
HPhase phase("L_Allocate general registers", this);
- num_registers_ = Register::kNumAllocatableRegisters;
+ num_registers_ = Register::NumAllocatableRegisters();
AllocateRegisters();
}
void LAllocator::AllocateDoubleRegisters() {
HPhase phase("L_Allocate double registers", this);
- num_registers_ = DoubleRegister::kNumAllocatableRegisters;
+ num_registers_ = DoubleRegister::NumAllocatableRegisters();
mode_ = DOUBLE_REGISTERS;
AllocateRegisters();
}
@@ -1502,7 +1516,7 @@ void LAllocator::AllocateRegisters() {
ASSERT(inactive_live_ranges_.is_empty());
if (mode_ == DOUBLE_REGISTERS) {
- for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
LiveRange* current = fixed_double_live_ranges_.at(i);
if (current != NULL) {
AddToInactive(current);
@@ -1757,14 +1771,14 @@ void LAllocator::InactiveToActive(LiveRange* range) {
// TryAllocateFreeReg and AllocateBlockedReg assume this
// when allocating local arrays.
-STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >=
- Register::kNumAllocatableRegisters);
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+ Register::kMaxNumAllocatableRegisters);
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
- LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1801,7 +1815,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning preferred reg %s to live range %d\n",
RegisterName(register_index),
current->id());
- current->set_assigned_register(register_index, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, register_index, mode_, zone_);
return true;
}
}
@@ -1837,7 +1851,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
TraceAlloc("Assigning free reg %s to live range %d\n",
RegisterName(reg),
current->id());
- current->set_assigned_register(reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
return true;
}
@@ -1853,10 +1867,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
}
- LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters];
- LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters];
+ LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+ LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1927,7 +1941,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
TraceAlloc("Assigning blocked reg %s to live range %d\n",
RegisterName(reg),
current->id());
- current->set_assigned_register(reg, mode_, zone_);
+ SetLiveRangeAssignedRegister(current, reg, mode_, zone_);
// This register was not free. Thus we need to find and spill
// parts of active and inactive live regions that use the same register
diff --git a/src/3rdparty/v8/src/lithium-allocator.h b/src/3rdparty/v8/src/lithium-allocator.h
index 5b05263..2953550 100644
--- a/src/3rdparty/v8/src/lithium-allocator.h
+++ b/src/3rdparty/v8/src/lithium-allocator.h
@@ -399,40 +399,6 @@ class LiveRange: public ZoneObject {
};
-class GrowableBitVector BASE_EMBEDDED {
- public:
- GrowableBitVector() : bits_(NULL) { }
-
- bool Contains(int value) const {
- if (!InBitsRange(value)) return false;
- return bits_->Contains(value);
- }
-
- void Add(int value, Zone* zone) {
- EnsureCapacity(value, zone);
- bits_->Add(value);
- }
-
- private:
- static const int kInitialLength = 1024;
-
- bool InBitsRange(int value) const {
- return bits_ != NULL && bits_->length() > value;
- }
-
- void EnsureCapacity(int value, Zone* zone) {
- if (InBitsRange(value)) return;
- int new_length = bits_ == NULL ? kInitialLength : bits_->length();
- while (new_length <= value) new_length *= 2;
- BitVector* new_bits = new(zone) BitVector(new_length, zone);
- if (bits_ != NULL) new_bits->CopyFrom(*bits_);
- bits_ = new_bits;
- }
-
- BitVector* bits_;
-};
-
-
class LAllocator BASE_EMBEDDED {
public:
LAllocator(int first_virtual_register, HGraph* graph);
@@ -479,6 +445,13 @@ class LAllocator BASE_EMBEDDED {
void Verify() const;
#endif
+ BitVector* assigned_registers() {
+ return assigned_registers_;
+ }
+ BitVector* assigned_double_registers() {
+ return assigned_double_registers_;
+ }
+
private:
void MeetRegisterConstraints();
void ResolvePhis();
@@ -571,6 +544,11 @@ class LAllocator BASE_EMBEDDED {
HBasicBlock* block,
HBasicBlock* pred);
+ inline void SetLiveRangeAssignedRegister(LiveRange* range,
+ int reg,
+ RegisterKind register_kind,
+ Zone* zone);
+
// Return parallel move that should be used to connect ranges split at the
// given position.
LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
@@ -608,9 +586,9 @@ class LAllocator BASE_EMBEDDED {
ZoneList<LiveRange*> live_ranges_;
// Lists of live ranges
- EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
fixed_live_ranges_;
- EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters>
+ EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
fixed_double_live_ranges_;
ZoneList<LiveRange*> unhandled_live_ranges_;
ZoneList<LiveRange*> active_live_ranges_;
@@ -625,6 +603,9 @@ class LAllocator BASE_EMBEDDED {
RegisterKind mode_;
int num_registers_;
+ BitVector* assigned_registers_;
+ BitVector* assigned_double_registers_;
+
HGraph* graph_;
bool has_osr_entry_;
diff --git a/src/3rdparty/v8/src/lithium.cc b/src/3rdparty/v8/src/lithium.cc
index eb2198d..09c0f44 100644
--- a/src/3rdparty/v8/src/lithium.cc
+++ b/src/3rdparty/v8/src/lithium.cc
@@ -174,6 +174,9 @@ void LParallelMove::PrintDataTo(StringStream* stream) const {
void LEnvironment::PrintTo(StringStream* stream) {
stream->Add("[id=%d|", ast_id().ToInt());
+ if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+ stream->Add("deopt_id=%d|", deoptimization_index());
+ }
stream->Add("[parameters=%d|", parameter_count());
stream->Add("[arguments_stack_height=%d|", arguments_stack_height());
for (int i = 0; i < values_.length(); ++i) {
@@ -257,6 +260,28 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) {
}
+int StackSlotOffset(int index) {
+ if (index >= 0) {
+ // Local or spill slot. Skip the frame pointer, function, and
+ // context in the fixed part of the frame.
+ return -(index + 3) * kPointerSize;
+ } else {
+ // Incoming parameter. Skip the return address.
+ return -(index - 1) * kPointerSize;
+ }
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+ : spill_slot_count_(0),
+ info_(info),
+ graph_(graph),
+ instructions_(32, graph->zone()),
+ pointer_maps_(8, graph->zone()),
+ inlined_closures_(1, graph->zone()) {
+}
+
+
LLabel* LChunk::GetLabel(int block_id) const {
HBasicBlock* block = graph_->blocks()->at(block_id);
int first_instruction = block->first_instruction_index();
@@ -391,7 +416,7 @@ Representation LChunk::LookupLiteralRepresentation(
LChunk* LChunk::NewChunk(HGraph* graph) {
- NoHandleAllocation no_handles;
+ NoHandleAllocation no_handles(graph->isolate());
AssertNoAllocation no_gc;
int values = graph->GetMaximumValueID();
@@ -410,12 +435,18 @@ LChunk* LChunk::NewChunk(HGraph* graph) {
return NULL;
}
+ chunk->set_allocated_double_registers(
+ allocator.assigned_double_registers());
+
return chunk;
}
-Handle<Code> LChunk::Codegen() {
+Handle<Code> LChunk::Codegen(Code::Kind kind) {
MacroAssembler assembler(info()->isolate(), NULL, 0);
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeStartLinePosInfoRecordEvent(
+ assembler.positions_recorder()));
LCodeGen generator(this, &assembler, info());
MarkEmptyBlocks();
@@ -425,10 +456,18 @@ Handle<Code> LChunk::Codegen() {
PrintF("Crankshaft Compiler - ");
}
CodeGenerator::MakeCodePrologue(info());
- Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
+ Code::Flags flags = Code::ComputeFlags(kind);
Handle<Code> code =
CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
generator.FinishCode(code);
+
+ if (!code.is_null()) {
+ void* jit_handler_data =
+ assembler.positions_recorder()->DetachJITHandlerData();
+ LOG_CODE_EVENT(info()->isolate(),
+ CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+ }
+
CodeGenerator::PrintCode(code, info());
return code;
}
@@ -436,4 +475,21 @@ Handle<Code> LChunk::Codegen() {
}
+void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
+ allocated_double_registers_ = allocated_registers;
+ BitVector* doubles = allocated_double_registers();
+ BitVector::Iterator iterator(doubles);
+ while (!iterator.Done()) {
+ if (info()->saves_caller_doubles()) {
+ if (kDoubleSize == kPointerSize * 2) {
+ spill_slot_count_ += 2;
+ } else {
+ spill_slot_count_++;
+ }
+ }
+ iterator.Advance();
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/lithium.h b/src/3rdparty/v8/src/lithium.h
index b4eb2bb..420a262 100644
--- a/src/3rdparty/v8/src/lithium.h
+++ b/src/3rdparty/v8/src/lithium.h
@@ -156,8 +156,8 @@ class LUnallocated: public LOperand {
};
static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << kFixedIndexWidth) - 1;
- static const int kMinFixedIndex = -(1 << kFixedIndexWidth);
+ static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
+ static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
bool HasAnyPolicy() const {
return policy() == ANY;
@@ -581,6 +581,7 @@ class ShallowIterator BASE_EMBEDDED {
LOperand* Current() {
ASSERT(!Done());
+ ASSERT(env_->values()->at(current_) != NULL);
return env_->values()->at(current_);
}
@@ -622,6 +623,7 @@ class DeepIterator BASE_EMBEDDED {
LOperand* Current() {
ASSERT(!current_iterator_.Done());
+ ASSERT(current_iterator_.Current() != NULL);
return current_iterator_.Current();
}
@@ -682,22 +684,22 @@ class LChunk: public ZoneObject {
Zone* zone() const { return info_->zone(); }
- Handle<Code> Codegen();
+ Handle<Code> Codegen(Code::Kind kind);
+
+ void set_allocated_double_registers(BitVector* allocated_registers);
+ BitVector* allocated_double_registers() {
+ return allocated_double_registers_;
+ }
protected:
- LChunk(CompilationInfo* info, HGraph* graph)
- : spill_slot_count_(0),
- info_(info),
- graph_(graph),
- instructions_(32, graph->zone()),
- pointer_maps_(8, graph->zone()),
- inlined_closures_(1, graph->zone()) { }
+ LChunk(CompilationInfo* info, HGraph* graph);
int spill_slot_count_;
private:
CompilationInfo* info_;
HGraph* const graph_;
+ BitVector* allocated_double_registers_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
ZoneList<Handle<JSFunction> > inlined_closures_;
@@ -705,6 +707,14 @@ class LChunk: public ZoneObject {
int ElementsKindToShiftSize(ElementsKind elements_kind);
+int StackSlotOffset(int index);
+
+enum NumberUntagDMode {
+ NUMBER_CANDIDATE_IS_SMI,
+ NUMBER_CANDIDATE_IS_SMI_OR_HOLE,
+ NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE,
+ NUMBER_CANDIDATE_IS_ANY_TAGGED
+};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/liveedit-debugger.js b/src/3rdparty/v8/src/liveedit-debugger.js
index cfcdb81..451b146 100644
--- a/src/3rdparty/v8/src/liveedit-debugger.js
+++ b/src/3rdparty/v8/src/liveedit-debugger.js
@@ -76,7 +76,17 @@ Debug.LiveEdit = new function() {
try {
new_compile_info = GatherCompileInfo(new_source, script);
} catch (e) {
- throw new Failure("Failed to compile new version of script: " + e);
+ var failure =
+ new Failure("Failed to compile new version of script: " + e);
+ if (e instanceof SyntaxError) {
+ var details = {
+ type: "liveedit_compile_error",
+ syntaxErrorMessage: e.message
+ };
+ CopyErrorPositionToDetails(e, details);
+ failure.details = details;
+ }
+ throw failure;
}
var root_new_node = BuildCodeInfoTree(new_compile_info);
@@ -978,6 +988,31 @@ Debug.LiveEdit = new function() {
return "LiveEdit Failure: " + this.message;
};
+ function CopyErrorPositionToDetails(e, details) {
+ function createPositionStruct(script, position) {
+ if (position == -1) return;
+ var location = script.locationFromPosition(position, true);
+ if (location == null) return;
+ return {
+ line: location.line + 1,
+ column: location.column + 1,
+ position: position
+ };
+ }
+
+ if (!("scriptObject" in e) || !("startPosition" in e)) {
+ return;
+ }
+
+ var script = e.scriptObject;
+
+ var position_struct = {
+ start: createPositionStruct(script, e.startPosition),
+ end: createPositionStruct(script, e.endPosition)
+ };
+ details.position = position_struct;
+ }
+
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
diff --git a/src/3rdparty/v8/src/liveedit.cc b/src/3rdparty/v8/src/liveedit.cc
index dc7d4b1..382f209 100644
--- a/src/3rdparty/v8/src/liveedit.cc
+++ b/src/3rdparty/v8/src/liveedit.cc
@@ -36,6 +36,7 @@
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
+#include "messages.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
@@ -348,23 +349,26 @@ static void NarrowDownInput(SubrangableInput* input,
// Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end).
class CompareOutputArrayWriter {
public:
- CompareOutputArrayWriter()
- : array_(FACTORY->NewJSArray(10)), current_size_(0) {}
+ explicit CompareOutputArrayWriter(Isolate* isolate)
+ : array_(isolate->factory()->NewJSArray(10)), current_size_(0) {}
Handle<JSArray> GetResult() {
return array_;
}
void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) {
+ Isolate* isolate = array_->GetIsolate();
SetElementNonStrict(array_,
- current_size_,
- Handle<Object>(Smi::FromInt(char_pos1)));
+ current_size_,
+ Handle<Object>(Smi::FromInt(char_pos1), isolate));
SetElementNonStrict(array_,
current_size_ + 1,
- Handle<Object>(Smi::FromInt(char_pos1 + char_len1)));
+ Handle<Object>(Smi::FromInt(char_pos1 + char_len1),
+ isolate));
SetElementNonStrict(array_,
current_size_ + 2,
- Handle<Object>(Smi::FromInt(char_pos2 + char_len2)));
+ Handle<Object>(Smi::FromInt(char_pos2 + char_len2),
+ isolate));
current_size_ += 3;
}
@@ -526,7 +530,8 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1,
LineEndsWrapper line_ends2,
Handle<String> s1, Handle<String> s2)
- : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
+ : array_writer_(s1->GetIsolate()),
+ line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2),
subrange_offset1_(0), subrange_offset2_(0) {
}
@@ -541,7 +546,7 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput {
if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) {
// Chunk is small enough to conduct a nested token-level diff.
- HandleScope subTaskScope;
+ HandleScope subTaskScope(s1_->GetIsolate());
TokensCompareInput tokens_input(s1_, char_pos1, char_len1,
s2_, char_pos2, char_len2);
@@ -619,7 +624,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
// Unwraps JSValue object, returning its field "value"
static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) {
- return Handle<Object>(jsValue->value());
+ return Handle<Object>(jsValue->value(), jsValue->GetIsolate());
}
@@ -670,6 +675,9 @@ class JSArrayBasedStruct {
Handle<JSArray> GetJSArray() {
return array_;
}
+ Isolate* isolate() const {
+ return array_->GetIsolate();
+ }
protected:
void SetField(int field_position, Handle<Object> value) {
@@ -678,7 +686,7 @@ class JSArrayBasedStruct {
void SetSmiValueField(int field_position, int value) {
SetElementNonStrict(array_,
field_position,
- Handle<Smi>(Smi::FromInt(value)));
+ Handle<Smi>(Smi::FromInt(value), isolate()));
}
Object* GetField(int field_position) {
return array_->GetElementNoExceptionThrown(field_position);
@@ -703,12 +711,14 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
: JSArrayBasedStruct<FunctionInfoWrapper>(array) {
}
void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num, int parent_index) {
- HandleScope scope;
+ int end_position, int param_num,
+ int literal_count, int parent_index) {
+ HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
+ this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
@@ -726,6 +736,9 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
+ int GetLiteralCount() {
+ return this->GetSmiValueField(kLiteralNumOffset_);
+ }
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -759,7 +772,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kOuterScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
- static const int kSize_ = 9;
+ static const int kLiteralNumOffset_ = 9;
+ static const int kSize_ = 10;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -781,7 +795,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
void SetProperties(Handle<String> name, int start_position, int end_position,
Handle<SharedFunctionInfo> info) {
- HandleScope scope;
+ HandleScope scope(isolate());
this->SetField(kFunctionNameOffset_, name);
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedInfoOffset_, info_holder);
@@ -808,17 +822,18 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> {
class FunctionInfoListener {
public:
- FunctionInfoListener() {
+ explicit FunctionInfoListener(Isolate* isolate) {
current_parent_index_ = -1;
len_ = 0;
- result_ = FACTORY->NewJSArray(10);
+ result_ = isolate->factory()->NewJSArray(10);
}
void FunctionStarted(FunctionLiteral* fun) {
- HandleScope scope;
+ HandleScope scope(isolate());
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
+ fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -826,7 +841,7 @@ class FunctionInfoListener {
}
void FunctionDone() {
- HandleScope scope;
+ HandleScope scope(isolate());
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
result_->GetElementNoExceptionThrown(current_parent_index_));
@@ -839,7 +854,9 @@ class FunctionInfoListener {
FunctionInfoWrapper info =
FunctionInfoWrapper::cast(
result_->GetElementNoExceptionThrown(current_parent_index_));
- info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value()));
+ info.SetFunctionCode(function_code,
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
}
// Saves full information about a function: its code, its scope info
@@ -853,20 +870,23 @@ class FunctionInfoListener {
FunctionInfoWrapper::cast(
result_->GetElementNoExceptionThrown(current_parent_index_));
info.SetFunctionCode(Handle<Code>(shared->code()),
- Handle<Object>(shared->scope_info()));
+ Handle<Object>(shared->scope_info(), isolate()));
info.SetSharedFunctionInfo(shared);
- Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone));
+ Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone),
+ isolate());
info.SetOuterScopeInfo(scope_info_list);
}
Handle<JSArray> GetResult() { return result_; }
private:
+ Isolate* isolate() const { return result_->GetIsolate(); }
+
Object* SerializeFunctionScope(Scope* scope, Zone* zone) {
- HandleScope handle_scope;
+ HandleScope handle_scope(isolate());
- Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10);
+ Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10);
int scope_info_length = 0;
// Saves some description of scope. It stores name and indexes of
@@ -874,7 +894,7 @@ class FunctionInfoListener {
// scopes of this chain.
Scope* outer_scope = scope->outer_scope();
if (outer_scope == NULL) {
- return HEAP->undefined_value();
+ return isolate()->heap()->undefined_value();
}
do {
ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone);
@@ -890,12 +910,13 @@ class FunctionInfoListener {
SetElementNonStrict(
scope_info_list,
scope_info_length,
- Handle<Smi>(Smi::FromInt(context_list[i]->index())));
+ Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate()));
scope_info_length++;
}
SetElementNonStrict(scope_info_list,
scope_info_length,
- Handle<Object>(HEAP->null_value()));
+ Handle<Object>(isolate()->heap()->null_value(),
+ isolate()));
scope_info_length++;
outer_scope = outer_scope->outer_scope();
@@ -914,20 +935,71 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<String> source) {
Isolate* isolate = Isolate::Current();
- FunctionInfoListener listener;
- Handle<Object> original_source = Handle<Object>(script->source());
+ FunctionInfoListener listener(isolate);
+ Handle<Object> original_source =
+ Handle<Object>(script->source(), isolate);
script->set_source(*source);
isolate->set_active_function_info_listener(&listener);
- CompileScriptForTracker(isolate, script);
+
+ {
+ // Creating verbose TryCatch from public API is currently the only way to
+ // force code save location. We do not use this the object directly.
+ v8::TryCatch try_catch;
+ try_catch.SetVerbose(true);
+
+ // A logical 'try' section.
+ CompileScriptForTracker(isolate, script);
+ }
+
+ // A logical 'catch' section.
+ Handle<JSObject> rethrow_exception;
+ if (isolate->has_pending_exception()) {
+ Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(),
+ isolate);
+ MessageLocation message_location = isolate->GetMessageLocation();
+
+ isolate->clear_pending_message();
+ isolate->clear_pending_exception();
+
+ // If possible, copy positions from message object to exception object.
+ if (exception->IsJSObject() && !message_location.script().is_null()) {
+ rethrow_exception = Handle<JSObject>::cast(exception);
+
+ Factory* factory = isolate->factory();
+ Handle<String> start_pos_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("startPosition"));
+ Handle<String> end_pos_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("endPosition"));
+ Handle<String> script_obj_key = factory->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("scriptObject"));
+ Handle<Smi> start_pos(
+ Smi::FromInt(message_location.start_pos()), isolate);
+ Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
+ Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
+ JSReceiver::SetProperty(
+ rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
+ JSReceiver::SetProperty(
+ rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
+ }
+ }
+
+ // A logical 'finally' section.
isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
- return *(listener.GetResult());
+ if (rethrow_exception.is_null()) {
+ return *(listener.GetResult());
+ } else {
+ isolate->Throw(*rethrow_exception);
+ return 0;
+ }
}
void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) {
- HandleScope scope;
+ HandleScope scope(array->GetIsolate());
int len = GetArrayLength(array);
for (int i = 0; i < len; i++) {
Handle<SharedFunctionInfo> info(
@@ -991,10 +1063,11 @@ static void ReplaceCodeObject(Handle<Code> original,
// Since we are not in an incremental marking phase we can write pointers
// to code objects (that are never in new space) without worrying about
// write barriers.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = original->GetHeap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"liveedit.cc ReplaceCodeObject");
- ASSERT(!HEAP->InNewSpace(*substitution));
+ ASSERT(!heap->InNewSpace(*substitution));
AssertNoAllocation no_allocations_please;
@@ -1003,17 +1076,140 @@ static void ReplaceCodeObject(Handle<Code> original,
// Iterate over all roots. Stack frames may have pointer into original code,
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
- HEAP->IterateRoots(&visitor, VISIT_ALL);
+ heap->IterateRoots(&visitor, VISIT_ALL);
// Now iterate over all pointers of all objects, including code_target
// implicit pointers.
- HeapIterator iterator;
+ HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
obj->Iterate(&visitor);
}
}
+// Patch function literals.
+// Name 'literals' is a misnomer. Rather it's a cache for complex object
+// boilerplates and for a native context. We must clean cached values.
+// Additionally we may need to allocate a new array if number of literals
+// changed.
+class LiteralFixer {
+ public:
+ static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
+ Handle<SharedFunctionInfo> shared_info,
+ Isolate* isolate) {
+ int new_literal_count = compile_info_wrapper->GetLiteralCount();
+ if (new_literal_count > 0) {
+ new_literal_count += JSFunction::kLiteralsPrefixSize;
+ }
+ int old_literal_count = shared_info->num_literals();
+
+ if (old_literal_count == new_literal_count) {
+ // If literal count didn't change, simply go over all functions
+ // and clear literal arrays.
+ ClearValuesVisitor visitor;
+ IterateJSFunctions(*shared_info, &visitor);
+ } else {
+ // When literal count changes, we have to create new array instances.
+ // Since we cannot create instances when iterating heap, we should first
+ // collect all functions and fix their literal arrays.
+ Handle<FixedArray> function_instances =
+ CollectJSFunctions(shared_info, isolate);
+ for (int i = 0; i < function_instances->length(); i++) {
+ Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
+ Handle<FixedArray> old_literals(fun->literals());
+ Handle<FixedArray> new_literals =
+ isolate->factory()->NewFixedArray(new_literal_count);
+ if (new_literal_count > 0) {
+ Handle<Context> native_context;
+ if (old_literals->length() >
+ JSFunction::kLiteralNativeContextIndex) {
+ native_context = Handle<Context>(
+ JSFunction::NativeContextFromLiterals(fun->literals()));
+ } else {
+ native_context = Handle<Context>(fun->context()->native_context());
+ }
+ new_literals->set(JSFunction::kLiteralNativeContextIndex,
+ *native_context);
+ }
+ fun->set_literals(*new_literals);
+ }
+
+ shared_info->set_num_literals(new_literal_count);
+ }
+ }
+
+ private:
+ // Iterates all function instances in the HEAP that refers to the
+ // provided shared_info.
+ template<typename Visitor>
+ static void IterateJSFunctions(SharedFunctionInfo* shared_info,
+ Visitor* visitor) {
+ AssertNoAllocation no_allocations_please;
+
+ HeapIterator iterator(shared_info->GetHeap());
+ for (HeapObject* obj = iterator.next(); obj != NULL;
+ obj = iterator.next()) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ if (function->shared() == shared_info) {
+ visitor->visit(function);
+ }
+ }
+ }
+ }
+
+ // Finds all instances of JSFunction that refers to the provided shared_info
+ // and returns array with them.
+ static Handle<FixedArray> CollectJSFunctions(
+ Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
+ CountVisitor count_visitor;
+ count_visitor.count = 0;
+ IterateJSFunctions(*shared_info, &count_visitor);
+ int size = count_visitor.count;
+
+ Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
+ if (size > 0) {
+ CollectVisitor collect_visitor(result);
+ IterateJSFunctions(*shared_info, &collect_visitor);
+ }
+ return result;
+ }
+
+ class ClearValuesVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ FixedArray* literals = fun->literals();
+ int len = literals->length();
+ for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
+ literals->set_undefined(j);
+ }
+ }
+ };
+
+ class CountVisitor {
+ public:
+ void visit(JSFunction* fun) {
+ count++;
+ }
+ int count;
+ };
+
+ class CollectVisitor {
+ public:
+ explicit CollectVisitor(Handle<FixedArray> output)
+ : m_output(output), m_pos(0) {}
+
+ void visit(JSFunction* fun) {
+ m_output->set(m_pos, fun);
+ m_pos++;
+ }
+ private:
+ Handle<FixedArray> m_output;
+ int m_pos;
+ };
+};
+
+
// Check whether the code is natural function code (not a lazy-compile stub
// code).
static bool IsJSFunctionCode(Code* code) {
@@ -1044,23 +1240,15 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) {
}
-class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
+class DependentFunctionFilter : public OptimizedFunctionFilter {
public:
- explicit DependentFunctionsDeoptimizingVisitor(
+ explicit DependentFunctionFilter(
SharedFunctionInfo* function_info)
: function_info_(function_info) {}
- virtual void EnterContext(Context* context) {
- }
-
- virtual void VisitFunction(JSFunction* function) {
- if (function->shared() == function_info_ ||
- IsInlined(function, function_info_)) {
- Deoptimizer::DeoptimizeFunction(function);
- }
- }
-
- virtual void LeaveContext(Context* context) {
+ virtual bool TakeFunction(JSFunction* function) {
+ return (function->shared() == function_info_ ||
+ IsInlined(function, function_info_));
}
private:
@@ -1071,18 +1259,19 @@ class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor {
static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) {
AssertNoAllocation no_allocation;
- DependentFunctionsDeoptimizingVisitor visitor(function_info);
- Deoptimizer::VisitAllOptimizedFunctions(&visitor);
+ DependentFunctionFilter filter(function_info);
+ Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
}
MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
@@ -1090,7 +1279,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
- HEAP->EnsureHeapIsIterable();
+ isolate->heap()->EnsureHeapIsIterable();
if (IsJSFunctionCode(shared_info->code())) {
Handle<Code> code = compile_info_wrapper.GetFunctionCode();
@@ -1113,32 +1302,34 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
+ LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
+
shared_info->set_construct_stub(
- Isolate::Current()->builtins()->builtin(
- Builtins::kJSConstructStubGeneric));
+ isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric));
DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ isolate->compilation_cache()->Remove(shared_info);
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
MaybeObject* LiveEdit::FunctionSourceUpdated(
Handle<JSArray> shared_info_array) {
- HandleScope scope;
+ Isolate* isolate = shared_info_array->GetIsolate();
+ HandleScope scope(isolate);
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return Isolate::Current()->ThrowIllegalOperation();
+ return isolate->ThrowIllegalOperation();
}
SharedInfoWrapper shared_info_wrapper(shared_info_array);
Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo();
DeoptimizeDependentFunctions(*shared_info);
- Isolate::Current()->compilation_cache()->Remove(shared_info);
+ isolate->compilation_cache()->Remove(shared_info);
- return HEAP->undefined_value();
+ return isolate->heap()->undefined_value();
}
@@ -1373,15 +1564,16 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) {
Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script,
Handle<String> new_source,
Handle<Object> old_script_name) {
+ Isolate* isolate = original_script->GetIsolate();
Handle<Object> old_script_object;
if (old_script_name->IsString()) {
Handle<Script> old_script = CreateScriptCopy(original_script);
old_script->set_name(String::cast(*old_script_name));
old_script_object = old_script;
- Isolate::Current()->debugger()->OnAfterCompile(
+ isolate->debugger()->OnAfterCompile(
old_script, Debugger::SEND_WHEN_DEBUGGING);
} else {
- old_script_object = Handle<Object>(HEAP->null_value());
+ old_script_object = isolate->factory()->null_value();
}
original_script->set_source(*new_source);
@@ -1427,6 +1619,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
Handle<JSFunction> function(
JSFunction::cast(JavaScriptFrame::cast(frame)->function()));
+ Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
for (int i = 0; i < len; i++) {
Object* element = shared_info_array->GetElementNoExceptionThrown(i);
@@ -1436,7 +1629,8 @@ static bool CheckActivation(Handle<JSArray> shared_info_array,
UnwrapSharedFunctionInfoFromJSValue(jsvalue);
if (function->shared() == *shared || IsInlined(*function, *shared)) {
- SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status)));
+ SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status),
+ isolate));
return true;
}
}
@@ -1489,7 +1683,7 @@ static const char* DropFrames(Vector<StackFrame*> frames,
Code* pre_top_frame_code = pre_top_frame->LookupCode();
bool frame_has_padding;
if (pre_top_frame_code->is_inline_cache_stub() &&
- pre_top_frame_code->ic_state() == DEBUG_BREAK) {
+ pre_top_frame_code->is_debug_break()) {
// OK, we can drop inline cache calls.
*mode = Debug::FRAME_DROPPED_IN_IC_CALL;
frame_has_padding = Debug::FramePaddingLayout::kIsSupported;
@@ -1639,7 +1833,7 @@ static const char* DropActivationsInActiveThreadImpl(
Isolate* isolate = Isolate::Current();
Debug* debug = isolate->debug();
ZoneScope scope(zone, DELETE_ON_EXIT);
- Vector<StackFrame*> frames = CreateStackMap(zone);
+ Vector<StackFrame*> frames = CreateStackMap(isolate, zone);
int top_frame_index = -1;
@@ -1741,6 +1935,7 @@ static const char* DropActivationsInActiveThread(
return message;
}
+ Isolate* isolate = shared_info_array->GetIsolate();
int array_len = GetArrayLength(shared_info_array);
// Replace "blocked on active" with "replaced on active" status.
@@ -1748,7 +1943,7 @@ static const char* DropActivationsInActiveThread(
if (result->GetElement(i) ==
Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) {
Handle<Object> replaced(
- Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK));
+ Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate);
SetElementNonStrict(result, i, replaced);
}
}
@@ -1783,16 +1978,17 @@ class InactiveThreadActivationsChecker : public ThreadVisitor {
Handle<JSArray> LiveEdit::CheckAndDropActivations(
Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) {
+ Isolate* isolate = shared_info_array->GetIsolate();
int len = GetArrayLength(shared_info_array);
- Handle<JSArray> result = FACTORY->NewJSArray(len);
+ Handle<JSArray> result = isolate->factory()->NewJSArray(len);
// Fill the default values.
for (int i = 0; i < len; i++) {
SetElementNonStrict(
result,
i,
- Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH)));
+ Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate));
}
diff --git a/src/3rdparty/v8/src/liveobjectlist-inl.h b/src/3rdparty/v8/src/liveobjectlist-inl.h
deleted file mode 100644
index 2bc2296..0000000
--- a/src/3rdparty/v8/src/liveobjectlist-inl.h
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_INL_H_
-#define V8_LIVEOBJECTLIST_INL_H_
-
-#include "v8.h"
-
-#include "liveobjectlist.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-void LiveObjectList::GCEpilogue() {
- if (!NeedLOLProcessing()) return;
- GCEpiloguePrivate();
-}
-
-
-void LiveObjectList::GCPrologue() {
- if (!NeedLOLProcessing()) return;
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-void LiveObjectList::IterateElements(ObjectVisitor* v) {
- if (!NeedLOLProcessing()) return;
- IterateElementsPrivate(v);
-}
-
-
-void LiveObjectList::ProcessNonLive(HeapObject* obj) {
- // Only do work if we have at least one list to process.
- if (last()) DoProcessNonLive(obj);
-}
-
-
-void LiveObjectList::UpdateReferencesForScavengeGC() {
- if (LiveObjectList::NeedLOLProcessing()) {
- UpdateLiveObjectListVisitor update_visitor;
- LiveObjectList::IterateElements(&update_visitor);
- }
-}
-
-
-LiveObjectList* LiveObjectList::FindLolForId(int id,
- LiveObjectList* start_lol) {
- if (id != 0) {
- LiveObjectList* lol = start_lol;
- while (lol != NULL) {
- if (lol->id() == id) {
- return lol;
- }
- lol = lol->prev_;
- }
- }
- return NULL;
-}
-
-
-// Iterates the elements in every lol and returns the one that matches the
-// specified key. If no matching element is found, then it returns NULL.
-template <typename T>
-inline LiveObjectList::Element*
-LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- Element* element = &elements[i];
- if (GetValue(element) == key) {
- return element;
- }
- }
- lol = lol->prev_;
- }
- return NULL;
-}
-
-
-inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) {
- return element->id_;
-}
-
-
-inline HeapObject*
-LiveObjectList::GetElementObj(LiveObjectList::Element* element) {
- return element->obj_;
-}
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_INL_H_
-
diff --git a/src/3rdparty/v8/src/liveobjectlist.cc b/src/3rdparty/v8/src/liveobjectlist.cc
deleted file mode 100644
index 6b89cf6..0000000
--- a/src/3rdparty/v8/src/liveobjectlist.cc
+++ /dev/null
@@ -1,2631 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifdef LIVE_OBJECT_LIST
-
-#include <ctype.h>
-#include <stdlib.h>
-
-#include "v8.h"
-
-#include "checks.h"
-#include "global-handles.h"
-#include "heap.h"
-#include "inspector.h"
-#include "isolate.h"
-#include "list-inl.h"
-#include "liveobjectlist-inl.h"
-#include "string-stream.h"
-#include "v8utils.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-typedef int (*RawComparer)(const void*, const void*);
-
-
-#ifdef CHECK_ALL_OBJECT_TYPES
-
-#define DEBUG_LIVE_OBJECT_TYPES(v) \
- v(Smi, "unexpected: Smi") \
- \
- v(CodeCache, "unexpected: CodeCache") \
- v(BreakPointInfo, "unexpected: BreakPointInfo") \
- v(DebugInfo, "unexpected: DebugInfo") \
- v(TypeSwitchInfo, "unexpected: TypeSwitchInfo") \
- v(SignatureInfo, "unexpected: SignatureInfo") \
- v(Script, "unexpected: Script") \
- v(ObjectTemplateInfo, "unexpected: ObjectTemplateInfo") \
- v(FunctionTemplateInfo, "unexpected: FunctionTemplateInfo") \
- v(CallHandlerInfo, "unexpected: CallHandlerInfo") \
- v(InterceptorInfo, "unexpected: InterceptorInfo") \
- v(AccessCheckInfo, "unexpected: AccessCheckInfo") \
- v(AccessorInfo, "unexpected: AccessorInfo") \
- v(ExternalTwoByteString, "unexpected: ExternalTwoByteString") \
- v(ExternalAsciiString, "unexpected: ExternalAsciiString") \
- v(ExternalString, "unexpected: ExternalString") \
- v(SeqTwoByteString, "unexpected: SeqTwoByteString") \
- v(SeqAsciiString, "unexpected: SeqAsciiString") \
- v(SeqString, "unexpected: SeqString") \
- v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
- v(NativeContext, "unexpected: NativeContext") \
- v(MapCache, "unexpected: MapCache") \
- v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \
- v(CompilationCacheTable, "unexpected: CompilationCacheTable") \
- v(SymbolTable, "unexpected: SymbolTable") \
- v(Dictionary, "unexpected: Dictionary") \
- v(HashTable, "unexpected: HashTable") \
- v(DescriptorArray, "unexpected: DescriptorArray") \
- v(ExternalFloatArray, "unexpected: ExternalFloatArray") \
- v(ExternalUnsignedIntArray, "unexpected: ExternalUnsignedIntArray") \
- v(ExternalIntArray, "unexpected: ExternalIntArray") \
- v(ExternalUnsignedShortArray, "unexpected: ExternalUnsignedShortArray") \
- v(ExternalShortArray, "unexpected: ExternalShortArray") \
- v(ExternalUnsignedByteArray, "unexpected: ExternalUnsignedByteArray") \
- v(ExternalByteArray, "unexpected: ExternalByteArray") \
- v(JSValue, "unexpected: JSValue")
-
-#else
-#define DEBUG_LIVE_OBJECT_TYPES(v)
-#endif
-
-
-#define FOR_EACH_LIVE_OBJECT_TYPE(v) \
- DEBUG_LIVE_OBJECT_TYPES(v) \
- \
- v(JSArray, "JSArray") \
- v(JSRegExp, "JSRegExp") \
- v(JSFunction, "JSFunction") \
- v(JSGlobalObject, "JSGlobal") \
- v(JSBuiltinsObject, "JSBuiltins") \
- v(GlobalObject, "Global") \
- v(JSGlobalProxy, "JSGlobalProxy") \
- v(JSObject, "JSObject") \
- \
- v(Context, "meta: Context") \
- v(ByteArray, "meta: ByteArray") \
- v(ExternalPixelArray, "meta: PixelArray") \
- v(ExternalArray, "meta: ExternalArray") \
- v(FixedArray, "meta: FixedArray") \
- v(String, "String") \
- v(HeapNumber, "HeapNumber") \
- \
- v(Code, "meta: Code") \
- v(Map, "meta: Map") \
- v(Oddball, "Oddball") \
- v(Foreign, "meta: Foreign") \
- v(SharedFunctionInfo, "meta: SharedFunctionInfo") \
- v(Struct, "meta: Struct") \
- \
- v(HeapObject, "HeapObject")
-
-
-enum /* LiveObjectType */ {
-#define DECLARE_OBJECT_TYPE_ENUM(type, name) kType##type,
- FOR_EACH_LIVE_OBJECT_TYPE(DECLARE_OBJECT_TYPE_ENUM)
- kInvalidLiveObjType,
- kNumberOfTypes
-#undef DECLARE_OBJECT_TYPE_ENUM
-};
-
-
-LiveObjectType GetObjectType(HeapObject* heap_obj) {
- // TODO(mlam): investigate usint Map::instance_type() instead.
-#define CHECK_FOR_OBJECT_TYPE(type, name) \
- if (heap_obj->Is##type()) return kType##type;
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_FOR_OBJECT_TYPE)
-#undef CHECK_FOR_OBJECT_TYPE
-
- UNREACHABLE();
- return kInvalidLiveObjType;
-}
-
-
-inline const char* GetObjectTypeDesc(LiveObjectType type) {
- static const char* const name[kNumberOfTypes] = {
- #define DEFINE_OBJECT_TYPE_NAME(type, name) name,
- FOR_EACH_LIVE_OBJECT_TYPE(DEFINE_OBJECT_TYPE_NAME)
- "invalid"
- #undef DEFINE_OBJECT_TYPE_NAME
- };
- ASSERT(type < kNumberOfTypes);
- return name[type];
-}
-
-
-const char* GetObjectTypeDesc(HeapObject* heap_obj) {
- LiveObjectType type = GetObjectType(heap_obj);
- return GetObjectTypeDesc(type);
-}
-
-
-bool IsOfType(LiveObjectType type, HeapObject* obj) {
- // Note: there are types that are more general (e.g. JSObject) that would
- // have passed the Is##type_() test for more specialized types (e.g.
- // JSFunction). If we find a more specialized match but we're looking for
- // the general type, then we should reject the ones that matches the
- // specialized type.
-#define CHECK_OBJECT_TYPE(type_, name) \
- if (obj->Is##type_()) return (type == kType##type_);
-
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return false;
-}
-
-
-const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1);
-
-static AllocationSpace FindSpaceFor(String* space_str) {
- SmartArrayPointer<char> s =
- space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- const char* key_str = *s;
- switch (key_str[0]) {
- case 'c':
- if (strcmp(key_str, "cell") == 0) return CELL_SPACE;
- if (strcmp(key_str, "code") == 0) return CODE_SPACE;
- break;
- case 'l':
- if (strcmp(key_str, "lo") == 0) return LO_SPACE;
- break;
- case 'm':
- if (strcmp(key_str, "map") == 0) return MAP_SPACE;
- break;
- case 'n':
- if (strcmp(key_str, "new") == 0) return NEW_SPACE;
- break;
- case 'o':
- if (strcmp(key_str, "old-pointer") == 0) return OLD_POINTER_SPACE;
- if (strcmp(key_str, "old-data") == 0) return OLD_DATA_SPACE;
- break;
- }
- return kInvalidSpace;
-}
-
-
-static bool InSpace(AllocationSpace space, HeapObject* heap_obj) {
- Heap* heap = ISOLATE->heap();
- if (space != LO_SPACE) {
- return heap->InSpace(heap_obj, space);
- }
-
- // This is an optimization to speed up the check for an object in the LO
- // space by exclusion because we know that all object pointers passed in
- // here are guaranteed to be in the heap. Hence, it is safe to infer
- // using an exclusion test.
- // Note: calling Heap::InSpace(heap_obj, LO_SPACE) is too slow for our
- // filters.
- int first_space = static_cast<int>(FIRST_SPACE);
- int last_space = static_cast<int>(LO_SPACE);
- for (int sp = first_space; sp < last_space; sp++) {
- if (heap->InSpace(heap_obj, static_cast<AllocationSpace>(sp))) {
- return false;
- }
- }
- SLOW_ASSERT(heap->InSpace(heap_obj, LO_SPACE));
- return true;
-}
-
-
-static LiveObjectType FindTypeFor(String* type_str) {
- SmartArrayPointer<char> s =
- type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
-#define CHECK_OBJECT_TYPE(type_, name) { \
- const char* type_desc = GetObjectTypeDesc(kType##type_); \
- const char* key_str = *s; \
- if (strstr(type_desc, key_str) != NULL) return kType##type_; \
- }
- FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE)
-#undef CHECK_OBJECT_TYPE
-
- return kInvalidLiveObjType;
-}
-
-
-class LolFilter {
- public:
- explicit LolFilter(Handle<JSObject> filter_obj);
-
- inline bool is_active() const { return is_active_; }
- inline bool Matches(HeapObject* obj) {
- return !is_active() || MatchesSlow(obj);
- }
-
- private:
- void InitTypeFilter(Handle<JSObject> filter_obj);
- void InitSpaceFilter(Handle<JSObject> filter_obj);
- void InitPropertyFilter(Handle<JSObject> filter_obj);
- bool MatchesSlow(HeapObject* obj);
-
- bool is_active_;
- LiveObjectType type_;
- AllocationSpace space_;
- Handle<String> prop_;
-};
-
-
-LolFilter::LolFilter(Handle<JSObject> filter_obj)
- : is_active_(false),
- type_(kInvalidLiveObjType),
- space_(kInvalidSpace),
- prop_() {
- if (filter_obj.is_null()) return;
-
- InitTypeFilter(filter_obj);
- InitSpaceFilter(filter_obj);
- InitPropertyFilter(filter_obj);
-}
-
-
-void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) {
- Handle<String> type_sym = FACTORY->LookupAsciiSymbol("type");
- MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym);
- Object* type_obj;
- if (maybe_result->ToObject(&type_obj)) {
- if (type_obj->IsString()) {
- String* type_str = String::cast(type_obj);
- type_ = FindTypeFor(type_str);
- if (type_ != kInvalidLiveObjType) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) {
- Handle<String> space_sym = FACTORY->LookupAsciiSymbol("space");
- MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym);
- Object* space_obj;
- if (maybe_result->ToObject(&space_obj)) {
- if (space_obj->IsString()) {
- String* space_str = String::cast(space_obj);
- space_ = FindSpaceFor(space_str);
- if (space_ != kInvalidSpace) {
- is_active_ = true;
- }
- }
- }
-}
-
-
-void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) {
- Handle<String> prop_sym = FACTORY->LookupAsciiSymbol("prop");
- MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym);
- Object* prop_obj;
- if (maybe_result->ToObject(&prop_obj)) {
- if (prop_obj->IsString()) {
- prop_ = Handle<String>(String::cast(prop_obj));
- is_active_ = true;
- }
- }
-}
-
-
-bool LolFilter::MatchesSlow(HeapObject* obj) {
- if ((type_ != kInvalidLiveObjType) && !IsOfType(type_, obj)) {
- return false; // Fail because obj is not of the type of interest.
- }
- if ((space_ != kInvalidSpace) && !InSpace(space_, obj)) {
- return false; // Fail because obj is not in the space of interest.
- }
- if (!prop_.is_null() && obj->IsJSObject()) {
- LookupResult result;
- obj->Lookup(*prop_, &result);
- if (!result.IsProperty()) {
- return false; // Fail because obj does not have the property of interest.
- }
- }
- return true;
-}
-
-
-class LolIterator {
- public:
- LolIterator(LiveObjectList* older, LiveObjectList* newer)
- : older_(older),
- newer_(newer),
- curr_(0),
- elements_(0),
- count_(0),
- index_(0) { }
-
- inline void Init() {
- SetCurrent(newer_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->prev_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == older_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Iterate backwards until we get to the oldest list.
- while (!Done()) {
- SetCurrent(curr_->prev_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next older list.
- }
- }
- }
-
- inline int Id() const {
- return elements_[index_].id_;
- }
- inline HeapObject* Obj() const {
- return elements_[index_].obj_;
- }
-
- inline int LolObjCount() const {
- if (curr_ != NULL) return curr_->obj_count_;
- return 0;
- }
-
- protected:
- inline void SetCurrent(LiveObjectList* new_curr) {
- curr_ = new_curr;
- if (curr_ != NULL) {
- elements_ = curr_->elements_;
- count_ = curr_->obj_count_;
- index_ = 0;
- }
- }
-
- LiveObjectList* older_;
- LiveObjectList* newer_;
- LiveObjectList* curr_;
- LiveObjectList::Element* elements_;
- int count_;
- int index_;
-};
-
-
-class LolForwardIterator : public LolIterator {
- public:
- LolForwardIterator(LiveObjectList* first, LiveObjectList* last)
- : LolIterator(first, last) {
- }
-
- inline void Init() {
- SetCurrent(older_);
- // If the elements_ list is empty, then move on to the next list as long
- // as we're not at the last list (indicated by Done()).
- while ((elements_ == NULL) && !Done()) {
- SetCurrent(curr_->next_);
- }
- }
-
- inline bool Done() const {
- return (curr_ == newer_);
- }
-
- // Object level iteration.
- inline void Next() {
- index_++;
- if (index_ >= count_) {
- // Done with current list. Move on to the next.
- while (!Done()) { // If not at the last list already, ...
- SetCurrent(curr_->next_);
- // If we have elements to process, we're good to go.
- if (elements_ != NULL) break;
-
- // Else, we should advance to the next list.
- }
- }
- }
-};
-
-
-// Minimizes the white space in a string. Tabs and newlines are replaced
-// with a space where appropriate.
-static int CompactString(char* str) {
- char* src = str;
- char* dst = str;
- char prev_ch = 0;
- while (*dst != '\0') {
- char ch = *src++;
- // We will treat non-ASCII chars as '?'.
- if ((ch & 0x80) != 0) {
- ch = '?';
- }
- // Compact contiguous whitespace chars into a single ' '.
- if (isspace(ch)) {
- if (prev_ch != ' ') *dst++ = ' ';
- prev_ch = ' ';
- continue;
- }
- *dst++ = ch;
- prev_ch = ch;
- }
- return (dst - str);
-}
-
-
-// Generates a custom description based on the specific type of
-// object we're looking at. We only generate specialized
-// descriptions where we can. In all other cases, we emit the
-// generic info.
-static void GenerateObjectDesc(HeapObject* obj,
- char* buffer,
- int buffer_size) {
- Vector<char> buffer_v(buffer, buffer_size);
- ASSERT(obj != NULL);
- if (obj->IsJSArray()) {
- JSArray* jsarray = JSArray::cast(obj);
- double length = jsarray->length()->Number();
- OS::SNPrintF(buffer_v,
- "%p <%s> len %g",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- length);
-
- } else if (obj->IsString()) {
- String* str = String::cast(obj);
- // Only grab up to 160 chars in case they are double byte.
- // We'll only dump 80 of them after we compact them.
- const int kMaxCharToDump = 80;
- const int kMaxBufferSize = kMaxCharToDump * 2;
- SmartArrayPointer<char> str_sp = str->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL,
- 0,
- kMaxBufferSize);
- char* str_cstr = *str_sp;
- int length = CompactString(str_cstr);
- OS::SNPrintF(buffer_v,
- "%p <%s> '%.80s%s'",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- str_cstr,
- (length > kMaxCharToDump) ? "..." : "");
-
- } else if (obj->IsJSFunction() || obj->IsSharedFunctionInfo()) {
- SharedFunctionInfo* sinfo;
- if (obj->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(obj);
- sinfo = func->shared();
- } else {
- sinfo = SharedFunctionInfo::cast(obj);
- }
-
- String* name = sinfo->DebugName();
- SmartArrayPointer<char> name_sp =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- char* name_cstr = *name_sp;
-
- HeapStringAllocator string_allocator;
- StringStream stream(&string_allocator);
- sinfo->SourceCodePrint(&stream, 50);
- SmartArrayPointer<const char> source_sp = stream.ToCString();
- const char* source_cstr = *source_sp;
-
- OS::SNPrintF(buffer_v,
- "%p <%s> '%s' %s",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- name_cstr,
- source_cstr);
-
- } else if (obj->IsFixedArray()) {
- FixedArray* fixed = FixedArray::cast(obj);
-
- OS::SNPrintF(buffer_v,
- "%p <%s> len %d",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj),
- fixed->length());
-
- } else {
- OS::SNPrintF(buffer_v,
- "%p <%s>",
- reinterpret_cast<void*>(obj),
- GetObjectTypeDesc(obj));
- }
-}
-
-
-// Utility function for filling in a line of detail in a verbose dump.
-static bool AddObjDetail(Handle<FixedArray> arr,
- int index,
- int obj_id,
- Handle<HeapObject> target,
- const char* desc_str,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<JSObject> detail,
- Handle<String> desc,
- Handle<Object> error) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- detail = factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) {
- error = detail;
- return false;
- }
-
- int size = 0;
- char buffer[512];
- if (desc_str == NULL) {
- ASSERT(!target.is_null());
- HeapObject* obj = *target;
- GenerateObjectDesc(obj, buffer, sizeof(buffer));
- desc_str = buffer;
- size = obj->Size();
- }
- desc = factory->NewStringFromAscii(CStrVector(desc_str));
- if (desc->IsFailure()) {
- error = desc;
- return false;
- }
-
- { MaybeObject* maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
- { MaybeObject* maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return false;
- }
-
- arr->set(index, *detail);
- return true;
-}
-
-
-class DumpWriter {
- public:
- virtual ~DumpWriter() {}
-
- virtual void ComputeTotalCountAndSize(LolFilter* filter,
- int* count,
- int* size) = 0;
- virtual bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) = 0;
-};
-
-
-class LolDumpWriter: public DumpWriter {
- public:
- LolDumpWriter(LiveObjectList* older, LiveObjectList* newer)
- : older_(older), newer_(newer) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- *count = 0;
- *size = 0;
-
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- *size += heap_obj->Size();
- (*count)++;
- }
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- // The lols are listed in latest to earliest. We want to dump from
- // earliest to latest. So, compute the last element to start with.
- int index = 0;
- int count = 0;
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- // Fill the array with the lol object details.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> target;
-
- LiveObjectList* first_lol = (older_ != NULL) ?
- older_->next_ : LiveObjectList::first_;
- LiveObjectList* last_lol = (newer_ != NULL) ? newer_->next_ : NULL;
-
- LolForwardIterator it(first_lol, last_lol);
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Skip objects that have been filtered out.
- if (!filter->Matches(heap_obj)) {
- continue;
- }
-
- // Only report objects that are in the section of interest.
- if (count >= start) {
- target = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(elements_arr,
- index++,
- it.Id(),
- target,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return false;
- }
- count++;
- }
- return true;
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-class RetainersDumpWriter: public DumpWriter {
- public:
- RetainersDumpWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
-
- *size = -1;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- count,
- filter,
- NULL,
- *args_function_,
- error);
- }
-
- bool Write(Handle<FixedArray> elements_arr,
- int start,
- int dump_limit,
- LolFilter* filter,
- Handle<Object> error) {
- int dummy;
- int count;
-
- // Fill the retainer objects.
- count = LiveObjectList::GetRetainers(target_,
- instance_filter_,
- elements_arr,
- start,
- dump_limit,
- &dummy,
- filter,
- NULL,
- *args_function_,
- error);
- if (count < 0) {
- return false;
- }
- return true;
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-class LiveObjectSummary {
- public:
- explicit LiveObjectSummary(LolFilter* filter)
- : total_count_(0),
- total_size_(0),
- found_root_(false),
- found_weak_root_(false),
- filter_(filter) {
- memset(counts_, 0, sizeof(counts_[0]) * kNumberOfEntries);
- memset(sizes_, 0, sizeof(sizes_[0]) * kNumberOfEntries);
- }
-
- void Add(HeapObject* heap_obj) {
- int size = heap_obj->Size();
- LiveObjectType type = GetObjectType(heap_obj);
- ASSERT(type != kInvalidLiveObjType);
- counts_[type]++;
- sizes_[type] += size;
- total_count_++;
- total_size_ += size;
- }
-
- void set_found_root() { found_root_ = true; }
- void set_found_weak_root() { found_weak_root_ = true; }
-
- inline int Count(LiveObjectType type) {
- return counts_[type];
- }
- inline int Size(LiveObjectType type) {
- return sizes_[type];
- }
- inline int total_count() {
- return total_count_;
- }
- inline int total_size() {
- return total_size_;
- }
- inline bool found_root() {
- return found_root_;
- }
- inline bool found_weak_root() {
- return found_weak_root_;
- }
- int GetNumberOfEntries() {
- int entries = 0;
- for (int i = 0; i < kNumberOfEntries; i++) {
- if (counts_[i]) entries++;
- }
- return entries;
- }
-
- inline LolFilter* filter() { return filter_; }
-
- static const int kNumberOfEntries = kNumberOfTypes;
-
- private:
- int counts_[kNumberOfEntries];
- int sizes_[kNumberOfEntries];
- int total_count_;
- int total_size_;
- bool found_root_;
- bool found_weak_root_;
-
- LolFilter* filter_;
-};
-
-
-// Abstraction for a summary writer.
-class SummaryWriter {
- public:
- virtual ~SummaryWriter() {}
- virtual void Write(LiveObjectSummary* summary) = 0;
-};
-
-
-// A summary writer for filling in a summary of lol lists and diffs.
-class LolSummaryWriter: public SummaryWriter {
- public:
- LolSummaryWriter(LiveObjectList* older_lol,
- LiveObjectList* newer_lol)
- : older_(older_lol), newer_(newer_lol) {
- }
-
- void Write(LiveObjectSummary* summary) {
- LolFilter* filter = summary->filter();
-
- // Fill the summary with the lol object details.
- LolIterator it(older_, newer_);
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (!filter->Matches(heap_obj)) {
- continue;
- }
- summary->Add(heap_obj);
- }
- }
-
- private:
- LiveObjectList* older_;
- LiveObjectList* newer_;
-};
-
-
-// A summary writer for filling in a retainers list.
-class RetainersSummaryWriter: public SummaryWriter {
- public:
- RetainersSummaryWriter(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<JSFunction> args_function)
- : target_(target),
- instance_filter_(instance_filter),
- args_function_(args_function) {
- }
-
- void Write(LiveObjectSummary* summary) {
- Handle<FixedArray> retainers_arr;
- Handle<Object> error;
- int dummy_total_count;
- LiveObjectList::GetRetainers(target_,
- instance_filter_,
- retainers_arr,
- 0,
- Smi::kMaxValue,
- &dummy_total_count,
- summary->filter(),
- summary,
- *args_function_,
- error);
- }
-
- private:
- Handle<HeapObject> target_;
- Handle<JSObject> instance_filter_;
- Handle<JSFunction> args_function_;
-};
-
-
-uint32_t LiveObjectList::next_element_id_ = 1;
-int LiveObjectList::list_count_ = 0;
-int LiveObjectList::last_id_ = 0;
-LiveObjectList* LiveObjectList::first_ = NULL;
-LiveObjectList* LiveObjectList::last_ = NULL;
-
-
-LiveObjectList::LiveObjectList(LiveObjectList* prev, int capacity)
- : prev_(prev),
- next_(NULL),
- capacity_(capacity),
- obj_count_(0) {
- elements_ = NewArray<Element>(capacity);
- id_ = ++last_id_;
-
- list_count_++;
-}
-
-
-LiveObjectList::~LiveObjectList() {
- DeleteArray<Element>(elements_);
- delete prev_;
-}
-
-
-int LiveObjectList::GetTotalObjCountAndSize(int* size_p) {
- int size = 0;
- int count = 0;
- LiveObjectList* lol = this;
- do {
- // Only compute total size if requested i.e. when size_p is not null.
- if (size_p != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_; i++) {
- HeapObject* heap_obj = elements[i].obj_;
- size += heap_obj->Size();
- }
- }
- count += lol->obj_count_;
- lol = lol->prev_;
- } while (lol != NULL);
-
- if (size_p != NULL) {
- *size_p = size;
- }
- return count;
-}
-
-
-// Adds an object to the lol.
-// Returns true if successful, else returns false.
-bool LiveObjectList::Add(HeapObject* obj) {
- // If the object is already accounted for in the prev list which we inherit
- // from, then no need to add it to this list.
- if ((prev() != NULL) && (prev()->Find(obj) != NULL)) {
- return true;
- }
- ASSERT(obj_count_ <= capacity_);
- if (obj_count_ == capacity_) {
- // The heap must have grown and we have more objects than capacity to store
- // them.
- return false; // Fail this addition.
- }
- Element& element = elements_[obj_count_++];
- element.id_ = next_element_id_++;
- element.obj_ = obj;
- return true;
-}
-
-
-// Comparator used for sorting and searching the lol.
-int LiveObjectList::CompareElement(const Element* a, const Element* b) {
- const HeapObject* obj1 = a->obj_;
- const HeapObject* obj2 = b->obj_;
- // For lol elements, it doesn't matter which comes first if 2 elements point
- // to the same object (which gets culled later). Hence, we only care about
- // the the greater than / less than relationships.
- return (obj1 > obj2) ? 1 : (obj1 == obj2) ? 0 : -1;
-}
-
-
-// Looks for the specified object in the lol, and returns its element if found.
-LiveObjectList::Element* LiveObjectList::Find(HeapObject* obj) {
- LiveObjectList* lol = this;
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while ((result == NULL) && (lol != NULL)) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- lol = lol->prev_;
- }
- return result;
-}
-
-
-// "Nullifies" (convert the HeapObject* into an SMI) so that it will get cleaned
-// up in the GCEpilogue, while preserving the sort order of the lol.
-// NOTE: the lols need to be already sorted before NullifyMostRecent() is
-// called.
-void LiveObjectList::NullifyMostRecent(HeapObject* obj) {
- LiveObjectList* lol = last();
- Element key;
- Element* result = NULL;
-
- key.obj_ = obj;
- // Iterate through the chain of lol's to look for the object.
- while (lol != NULL) {
- result = reinterpret_cast<Element*>(
- bsearch(&key, lol->elements_, lol->obj_count_,
- sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
- if (result != NULL) {
- // Since there may be more than one (we are nullifying dup's after all),
- // find the first in the current lol, and nullify that. The lol should
- // be sorted already to make this easy (see the use of SortAll()).
- int i = result - lol->elements_;
-
- // NOTE: we sort the lol in increasing order. So, if an object has been
- // "nullified" (its lowest bit will be cleared to make it look like an
- // SMI), it would/should show up before the equivalent dups that have not
- // yet been "nullified". Hence, we should be searching backwards for the
- // first occurence of a matching object and nullify that instance. This
- // will ensure that we preserve the expected sorting order.
- for (i--; i > 0; i--) {
- Element* element = &lol->elements_[i];
- HeapObject* curr_obj = element->obj_;
- if (curr_obj != obj) {
- break; // No more matches. Let's move on.
- }
- result = element; // Let this earlier match be the result.
- }
-
- // Nullify the object.
- NullifyNonLivePointer(&result->obj_);
- return;
- }
- lol = lol->prev_;
- }
-}
-
-
-// Sorts the lol.
-void LiveObjectList::Sort() {
- if (obj_count_ > 0) {
- Vector<Element> elements_v(elements_, obj_count_);
- elements_v.Sort(CompareElement);
- }
-}
-
-
-// Sorts all captured lols starting from the latest.
-void LiveObjectList::SortAll() {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- lol->Sort();
- lol = lol->prev_;
- }
-}
-
-
-// Counts the number of objects in the heap.
-static int CountHeapObjects() {
- int count = 0;
- // Iterate over all the heap spaces and count the number of objects.
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- count++;
- }
- return count;
-}
-
-
-// Captures a current snapshot of all objects in the heap.
-MaybeObject* LiveObjectList::Capture() {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- HandleScope scope(isolate);
-
- // Count the number of objects in the heap.
- int total_count = CountHeapObjects();
- int count = total_count;
- int size = 0;
-
- LiveObjectList* last_lol = last();
- if (last_lol != NULL) {
- count -= last_lol->TotalObjCount();
- }
-
- LiveObjectList* lol;
-
- // Create a lol large enough to track all the objects.
- lol = new LiveObjectList(last_lol, count);
- if (lol == NULL) {
- return NULL; // No memory to proceed.
- }
-
- // The HeapIterator needs to be in its own scope because it disables
- // allocation, and we need allocate below.
- {
- // Iterate over all the heap spaces and add the objects.
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- bool failed = false;
- while (!failed && (heap_obj = iterator.next()) != NULL) {
- failed = !lol->Add(heap_obj);
- size += heap_obj->Size();
- }
- ASSERT(!failed);
-
- lol->Sort();
-
- // Add the current lol to the list of lols.
- if (last_ != NULL) {
- last_->next_ = lol;
- } else {
- first_ = lol;
- }
- last_ = lol;
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify(true);
- }
-#endif
- }
-
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- { MaybeObject* maybe_result = result->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
- { MaybeObject* maybe_result = result->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- return *result;
-}
-
-
-// Delete doesn't actually deletes an lol. It just marks it as invisible since
-// its contents are considered to be part of subsequent lists as well. The
-// only time we'll actually delete the lol is when we Reset() or if the lol is
-// invisible, and its element count reaches 0.
-bool LiveObjectList::Delete(int id) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- if (lol->id() == id) {
- break;
- }
- lol = lol->prev_;
- }
-
- // If no lol is found for this id, then we fail to delete.
- if (lol == NULL) return false;
-
- // Else, mark the lol as invisible i.e. id == 0.
- lol->id_ = 0;
- list_count_--;
- ASSERT(list_count_ >= 0);
- if (lol->obj_count_ == 0) {
- // Point the next lol's prev to this lol's prev.
- LiveObjectList* next = lol->next_;
- LiveObjectList* prev = lol->prev_;
- // Point next's prev to prev.
- if (next != NULL) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
- // Point prev's next to next.
- if (prev != NULL) {
- prev->next_ = lol->next_;
- } else {
- first_ = lol->next_;
- }
-
- lol->prev_ = NULL;
- lol->next_ = NULL;
-
- // Delete this now empty and invisible lol.
- delete lol;
- }
-
- // Just in case we've marked everything invisible, then clean up completely.
- if (list_count_ == 0) {
- Reset();
- }
-
- return true;
-}
-
-
-MaybeObject* LiveObjectList::Dump(int older_id,
- int newer_id,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList* newer_lol = FindLolForId(newer_id, last());
- LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolDumpWriter writer(older_lol, newer_lol);
- return DumpPrivate(&writer, start_idx, dump_limit, &filter);
-}
-
-
-MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
-
- // Calculate the number of entries of the dump.
- int count = -1;
- int size = -1;
- writer->ComputeTotalCountAndSize(filter, &count, &size);
-
- // Adjust for where to start the dump.
- if ((start < 0) || (start >= count)) {
- return Failure::Exception(); // invalid start.
- }
-
- int remaining_count = count - start;
- if (dump_limit > remaining_count) {
- dump_limit = remaining_count;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> elements_arr = factory->NewFixedArray(dump_limit);
- if (elements_arr->IsFailure()) return Object::cast(*elements_arr);
-
- // Fill in the dump.
- Handle<Object> error;
- bool success = writer->Write(elements_arr,
- start,
- dump_limit,
- filter,
- error);
- if (!success) return Object::cast(*error);
-
- MaybeObject* maybe_result;
-
- // Allocate the result body.
- Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Set the updated body.count.
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Set the updated body.size if appropriate.
- if (size >= 0) {
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- // Set body.first_index.
- Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
- maybe_result = body->SetProperty(*first_sym,
- Smi::FromInt(start),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Allocate the JSArray of the elements.
- Handle<JSObject> elements = factory->NewJSObject(isolate->array_function());
- if (elements->IsFailure()) return Object::cast(*elements);
-
- maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Set body.elements.
- Handle<String> elements_sym = factory->LookupAsciiSymbol("elements");
- maybe_result = body->SetProperty(*elements_sym,
- *elements,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-MaybeObject* LiveObjectList::Summarize(int older_id,
- int newer_id,
- Handle<JSObject> filter_obj) {
- if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) {
- return Failure::Exception(); // Fail: 0 is not a valid lol id.
- }
- if (newer_id < older_id) {
- // They are not in the expected order. Swap them.
- int temp = older_id;
- older_id = newer_id;
- newer_id = temp;
- }
-
- LiveObjectList* newer_lol = FindLolForId(newer_id, last());
- LiveObjectList* older_lol = FindLolForId(older_id, newer_lol);
-
- // If the id is defined, and we can't find a LOL for it, then we have an
- // invalid id.
- if ((newer_id != 0) && (newer_lol == NULL)) {
- return Failure::Exception(); // Fail: the newer lol id is invalid.
- }
- if ((older_id != 0) && (older_lol == NULL)) {
- return Failure::Exception(); // Fail: the older lol id is invalid.
- }
-
- LolFilter filter(filter_obj);
- LolSummaryWriter writer(older_lol, newer_lol);
- return SummarizePrivate(&writer, &filter, false);
-}
-
-
-// Creates a summary report for the debugger.
-// Note: the SummaryWriter takes care of iterating over objects and filling in
-// the summary.
-MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots) {
- HandleScope scope;
- MaybeObject* maybe_result;
-
- LiveObjectSummary summary(filter);
- writer->Write(&summary);
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // The result body will look like this:
- // body: {
- // count: <total_count>,
- // size: <total_size>,
- // found_root: <boolean>, // optional.
- // found_weak_root: <boolean>, // optional.
- // summary: [
- // {
- // desc: "<object type name>",
- // count: <count>,
- // size: size
- // },
- // ...
- // ]
- // }
-
- // Prefetch some needed symbols.
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
- Handle<String> summary_sym = factory->LookupAsciiSymbol("summary");
-
- // Allocate the summary array.
- int entries_count = summary.GetNumberOfEntries();
- Handle<FixedArray> summary_arr =
- factory->NewFixedArray(entries_count);
- if (summary_arr->IsFailure()) return Object::cast(*summary_arr);
-
- int idx = 0;
- for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) {
- // Allocate the summary record.
- Handle<JSObject> detail = factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- // Fill in the summary record.
- LiveObjectType type = static_cast<LiveObjectType>(i);
- int count = summary.Count(type);
- if (count) {
- const char* desc_cstr = GetObjectTypeDesc(type);
- Handle<String> desc = factory->LookupAsciiSymbol(desc_cstr);
- int size = summary.Size(type);
-
- maybe_result = detail->SetProperty(*desc_sym,
- *desc,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- summary_arr->set(idx++, *detail);
- }
- }
-
- // Wrap the summary fixed array in a JS array.
- Handle<JSObject> summary_obj =
- factory->NewJSObject(isolate->array_function());
- if (summary_obj->IsFailure()) return Object::cast(*summary_obj);
-
- maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr);
- if (maybe_result->IsFailure()) return maybe_result;
-
- // Create the body object.
- Handle<JSObject> body = factory->NewJSObject(isolate->object_function());
- if (body->IsFailure()) return Object::cast(*body);
-
- // Fill out the body object.
- int total_count = summary.total_count();
- int total_size = summary.total_size();
- maybe_result = body->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- maybe_result = body->SetProperty(*size_sym,
- Smi::FromInt(total_size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- if (is_tracking_roots) {
- int found_root = summary.found_root();
- int found_weak_root = summary.found_weak_root();
- Handle<String> root_sym = factory->LookupAsciiSymbol("found_root");
- Handle<String> weak_root_sym =
- factory->LookupAsciiSymbol("found_weak_root");
- maybe_result = body->SetProperty(*root_sym,
- Smi::FromInt(found_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = body->SetProperty(*weak_root_sym,
- Smi::FromInt(found_weak_root),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- }
-
- maybe_result = body->SetProperty(*summary_sym,
- *summary_obj,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
-}
-
-
-// Returns an array listing the captured lols.
-// Note: only dumps the section starting at start_idx and only up to
-// dump_limit entries.
-MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- HandleScope scope(isolate);
- MaybeObject* maybe_result;
-
- int total_count = LiveObjectList::list_count();
- int dump_count = total_count;
-
- // Adjust for where to start the dump.
- if (total_count == 0) {
- start_idx = 0; // Ensure this to get an empty list.
- } else if ((start_idx < 0) || (start_idx >= total_count)) {
- return Failure::Exception(); // invalid start.
- }
- dump_count -= start_idx;
-
- // Adjust for the dump limit.
- if (dump_count > dump_limit) {
- dump_count = dump_limit;
- }
-
- // Allocate an array to hold the result.
- Handle<FixedArray> list = factory->NewFixedArray(dump_count);
- if (list->IsFailure()) return Object::cast(*list);
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> count_sym = factory->LookupAsciiSymbol("count");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- // Fill the array with the lol details.
- int idx = 0;
- LiveObjectList* lol = first_;
- while ((lol != NULL) && (idx < start_idx)) { // Skip tail entries.
- if (lol->id() != 0) {
- idx++;
- }
- lol = lol->next();
- }
- idx = 0;
- while ((lol != NULL) && (dump_limit != 0)) {
- if (lol->id() != 0) {
- int count;
- int size;
- count = lol->GetTotalObjCountAndSize(&size);
-
- Handle<JSObject> detail =
- factory->NewJSObject(isolate->object_function());
- if (detail->IsFailure()) return Object::cast(*detail);
-
- maybe_result = detail->SetProperty(*id_sym,
- Smi::FromInt(lol->id()),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*count_sym,
- Smi::FromInt(count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- maybe_result = detail->SetProperty(*size_sym,
- Smi::FromInt(size),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
- list->set(idx++, *detail);
- dump_limit--;
- }
- lol = lol->next();
- }
-
- // Return the result as a JS array.
- Handle<JSObject> lols = factory->NewJSObject(isolate->array_function());
-
- maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
- if (result->IsFailure()) return Object::cast(*result);
-
- maybe_result = result->SetProperty(*count_sym,
- Smi::FromInt(total_count),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> first_sym = factory->LookupAsciiSymbol("first_index");
- maybe_result = result->SetProperty(*first_sym,
- Smi::FromInt(start_idx),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- Handle<String> lists_sym = factory->LookupAsciiSymbol("lists");
- maybe_result = result->SetProperty(*lists_sym,
- *lols,
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *result;
-}
-
-
-// Deletes all captured lols.
-void LiveObjectList::Reset() {
- LiveObjectList* lol = last();
- // Just delete the last. Each lol will delete it's prev automatically.
- delete lol;
-
- next_element_id_ = 1;
- list_count_ = 0;
- last_id_ = 0;
- first_ = NULL;
- last_ = NULL;
-}
-
-
-// Gets the object for the specified obj id.
-Object* LiveObjectList::GetObj(int obj_id) {
- Element* element = FindElementFor<int>(GetElementId, obj_id);
- if (element != NULL) {
- return Object::cast(element->obj_);
- }
- return HEAP->undefined_value();
-}
-
-
-// Gets the obj id for the specified address if valid.
-int LiveObjectList::GetObjId(Object* obj) {
- // Make a heap object pointer from the address.
- HeapObject* hobj = HeapObject::cast(obj);
- Element* element = FindElementFor<HeapObject*>(GetElementObj, hobj);
- if (element != NULL) {
- return element->id_;
- }
- return 0; // Invalid address.
-}
-
-
-// Gets the obj id for the specified address if valid.
-Object* LiveObjectList::GetObjId(Handle<String> address) {
- SmartArrayPointer<char> addr_str =
- address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-
- Isolate* isolate = Isolate::Current();
-
- // Extract the address value from the string.
- int value =
- static_cast<int>(StringToInt(isolate->unicode_cache(), *address, 16));
- Object* obj = reinterpret_cast<Object*>(value);
- return Smi::FromInt(GetObjId(obj));
-}
-
-
-// Helper class for copying HeapObjects.
-class LolVisitor: public ObjectVisitor {
- public:
- LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip)
- : target_(target), handle_to_skip_(handle_to_skip), found_(false) {}
-
- void VisitPointer(Object** p) { CheckPointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Check all HeapObject pointers in [start, end).
- for (Object** p = start; !found() && p < end; p++) CheckPointer(p);
- }
-
- inline bool found() const { return found_; }
- inline bool reset() { return found_ = false; }
-
- private:
- inline void CheckPointer(Object** p) {
- Object* object = *p;
- if (HeapObject::cast(object) == target_) {
- // We may want to skip this handle because the handle may be a local
- // handle in a handle scope in one of our callers. Once we return,
- // that handle will be popped. Hence, we don't want to count it as
- // a root that would have kept the target object alive.
- if (!handle_to_skip_.is_null() &&
- handle_to_skip_.location() == reinterpret_cast<HeapObject**>(p)) {
- return; // Skip this handle.
- }
- found_ = true;
- }
- }
-
- HeapObject* target_;
- Handle<HeapObject> handle_to_skip_;
- bool found_;
-};
-
-
-inline bool AddRootRetainerIfFound(const LolVisitor& visitor,
- LolFilter* filter,
- LiveObjectSummary* summary,
- void (*SetRootFound)(LiveObjectSummary* s),
- int start,
- int dump_limit,
- int* total_count,
- Handle<FixedArray> retainers_arr,
- int* count,
- int* index,
- const char* root_name,
- Handle<String> id_sym,
- Handle<String> desc_sym,
- Handle<String> size_sym,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- if (visitor.found()) {
- if (!filter->is_active()) {
- (*total_count)++;
- if (summary) {
- SetRootFound(summary);
- } else if ((*total_count > start) && ((*index) < dump_limit)) {
- (*count)++;
- if (!retainers_arr.is_null()) {
- return AddObjDetail(retainers_arr,
- (*index)++,
- 0,
- retainer,
- root_name,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- }
- }
- }
- }
- return true;
-}
-
-
-inline void SetFoundRoot(LiveObjectSummary* summary) {
- summary->set_found_root();
-}
-
-
-inline void SetFoundWeakRoot(LiveObjectSummary* summary) {
- summary->set_found_weak_root();
-}
-
-
-int LiveObjectList::GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary* summary,
- JSFunction* arguments_function,
- Handle<Object> error) {
- HandleScope scope;
-
- // Scratch handles.
- Handle<JSObject> detail;
- Handle<String> desc;
- Handle<HeapObject> retainer;
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
-
- // Prefetch some needed symbols.
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- Handle<String> desc_sym = factory->LookupAsciiSymbol("desc");
- Handle<String> size_sym = factory->LookupAsciiSymbol("size");
-
- NoHandleAllocation ha;
- int count = 0;
- int index = 0;
- Handle<JSObject> last_obj;
-
- *total_count = 0;
-
- // Iterate roots.
- LolVisitor lol_visitor(*target, target);
- isolate->heap()->IterateStrongRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- lol_visitor.reset();
- isolate->heap()->IterateWeakRoots(&lol_visitor, VISIT_ALL);
- if (!AddRootRetainerIfFound(lol_visitor,
- filter,
- summary,
- SetFoundWeakRoot,
- start,
- dump_limit,
- total_count,
- retainers_arr,
- &count,
- &index,
- "<weak root>",
- id_sym,
- desc_sym,
- size_sym,
- error)) {
- return -1;
- }
-
- // Iterate the live object lists.
- LolIterator it(NULL, last());
- for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) {
- HeapObject* heap_obj = it.Obj();
-
- // Only look at all JSObjects.
- if (heap_obj->IsJSObject()) {
- // Skip context extension objects and argument arrays as these are
- // checked in the context of functions using them.
- JSObject* obj = JSObject::cast(heap_obj);
- if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
- continue;
- }
-
- // Check if the JS object has a reference to the object looked for.
- if (obj->ReferencesObject(*target)) {
- // Check instance filter if supplied. This is normally used to avoid
- // references from mirror objects (see Runtime_IsInPrototypeChain).
- if (!instance_filter->IsUndefined()) {
- Object* V = obj;
- while (true) {
- Object* prototype = V->GetPrototype();
- if (prototype->IsNull()) {
- break;
- }
- if (*instance_filter == prototype) {
- obj = NULL; // Don't add this object.
- break;
- }
- V = prototype;
- }
- }
-
- if (obj != NULL) {
- // Skip objects that have been filtered out.
- if (filter->Matches(heap_obj)) {
- continue;
- }
-
- // Valid reference found add to instance array if supplied an update
- // count.
- last_obj = Handle<JSObject>(obj);
- (*total_count)++;
-
- if (summary != NULL) {
- summary->Add(heap_obj);
- } else if ((*total_count > start) && (index < dump_limit)) {
- count++;
- if (!retainers_arr.is_null()) {
- retainer = Handle<HeapObject>(heap_obj);
- bool success = AddObjDetail(retainers_arr,
- index++,
- it.Id(),
- retainer,
- NULL,
- id_sym,
- desc_sym,
- size_sym,
- detail,
- desc,
- error);
- if (!success) return -1;
- }
- }
- }
- }
- }
- }
-
- // Check for circular reference only. This can happen when the object is only
- // referenced from mirrors and has a circular reference in which case the
- // object is not really alive and would have been garbage collected if not
- // referenced from the mirror.
-
- if (*total_count == 1 && !last_obj.is_null() && *last_obj == *target) {
- count = 0;
- *total_count = 0;
- }
-
- return count;
-}
-
-
-MaybeObject* LiveObjectList::GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- HandleScope scope(isolate);
-
- // Get the target object.
- HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id));
- if (heap_obj == heap->undefined_value()) {
- return heap_obj;
- }
-
- Handle<HeapObject> target = Handle<HeapObject>(heap_obj);
-
- // Get the constructor function for context extension and arguments array.
- JSObject* arguments_boilerplate =
- isolate->context()->native_context()->arguments_boilerplate();
- JSFunction* arguments_function =
- JSFunction::cast(arguments_boilerplate->map()->constructor());
-
- Handle<JSFunction> args_function = Handle<JSFunction>(arguments_function);
- LolFilter filter(filter_obj);
-
- if (!verbose) {
- RetainersSummaryWriter writer(target, instance_filter, args_function);
- return SummarizePrivate(&writer, &filter, true);
-
- } else {
- RetainersDumpWriter writer(target, instance_filter, args_function);
- Object* body_obj;
- MaybeObject* maybe_result =
- DumpPrivate(&writer, start, dump_limit, &filter);
- if (!maybe_result->ToObject(&body_obj)) {
- return maybe_result;
- }
-
- // Set body.id.
- Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj));
- Handle<String> id_sym = factory->LookupAsciiSymbol("id");
- maybe_result = body->SetProperty(*id_sym,
- Smi::FromInt(obj_id),
- NONE,
- kNonStrictMode);
- if (maybe_result->IsFailure()) return maybe_result;
-
- return *body;
- }
-}
-
-
-Object* LiveObjectList::PrintObj(int obj_id) {
- Object* obj = GetObj(obj_id);
- if (!obj) {
- return HEAP->undefined_value();
- }
-
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-print-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- PrintF(f, "@%d ", LiveObjectList::GetObjId(obj));
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(f, obj);
-#endif // INSPECTOR
- PrintF(f, "\n");
- obj->Print(f);
-#else // !OBJECT_PRINT
- obj->ShortPrint(f);
-#endif // !OBJECT_PRINT
- PrintF(f, "\n");
- Flush(f);
- fclose(f);
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> dump_string =
- factory->NewExternalStringFromAscii(resource);
- heap->external_string_table()->AddString(*dump_string);
- return *dump_string;
- } else {
- delete resource;
- }
- return HEAP->undefined_value();
-}
-
-
-class LolPathTracer: public PathTracer {
- public:
- LolPathTracer(FILE* out,
- Object* search_target,
- WhatToFind what_to_find)
- : PathTracer(search_target, what_to_find, VISIT_ONLY_STRONG), out_(out) {}
-
- private:
- void ProcessResults();
-
- FILE* out_;
-};
-
-
-void LolPathTracer::ProcessResults() {
- if (found_target_) {
- PrintF(out_, "=====================================\n");
- PrintF(out_, "==== Path to object ====\n");
- PrintF(out_, "=====================================\n\n");
-
- ASSERT(!object_stack_.is_empty());
- Object* prev = NULL;
- for (int i = 0, index = 0; i < object_stack_.length(); i++) {
- Object* obj = object_stack_[i];
-
- // Skip this object if it is basically the internals of the
- // previous object (which would have dumped its details already).
- if (prev && prev->IsJSObject() &&
- (obj != search_target_)) {
- JSObject* jsobj = JSObject::cast(prev);
- if (obj->IsFixedArray() &&
- jsobj->properties() == FixedArray::cast(obj)) {
- // Skip this one because it would have been printed as the
- // properties of the last object already.
- continue;
- } else if (obj->IsHeapObject() &&
- jsobj->elements() == HeapObject::cast(obj)) {
- // Skip this one because it would have been printed as the
- // elements of the last object already.
- continue;
- }
- }
-
- // Print a connecting arrow.
- if (i > 0) PrintF(out_, "\n |\n |\n V\n\n");
-
- // Print the object index.
- PrintF(out_, "[%d] ", ++index);
-
- // Print the LOL object ID:
- int id = LiveObjectList::GetObjId(obj);
- if (id > 0) PrintF(out_, "@%d ", id);
-
-#ifdef OBJECT_PRINT
-#ifdef INSPECTOR
- Inspector::DumpObjectType(out_, obj);
-#endif // INSPECTOR
- PrintF(out_, "\n");
- obj->Print(out_);
-#else // !OBJECT_PRINT
- obj->ShortPrint(out_);
- PrintF(out_, "\n");
-#endif // !OBJECT_PRINT
- Flush(out_);
- }
- PrintF(out_, "\n");
- PrintF(out_, "=====================================\n\n");
- Flush(out_);
- }
-}
-
-
-Object* LiveObjectList::GetPathPrivate(HeapObject* obj1, HeapObject* obj2) {
- EmbeddedVector<char, 128> temp_filename;
- static int temp_count = 0;
- const char* path_prefix = ".";
-
- if (FLAG_lol_workdir) {
- path_prefix = FLAG_lol_workdir;
- }
- OS::SNPrintF(temp_filename, "%s/lol-getpath-%d", path_prefix, ++temp_count);
-
- FILE* f = OS::FOpen(temp_filename.start(), "w+");
-
- Isolate* isolate = Isolate::Current();
- Factory* factory = isolate->factory();
- Heap* heap = isolate->heap();
-
- // Save the previous verbosity.
- bool prev_verbosity = FLAG_use_verbose_printer;
- FLAG_use_verbose_printer = false;
-
- // Dump the paths.
- {
- // The tracer needs to be scoped because its usage asserts no allocation,
- // and we need to allocate the result string below.
- LolPathTracer tracer(f, obj2, LolPathTracer::FIND_FIRST);
-
- bool found = false;
- if (obj1 == NULL) {
- // Check for ObjectGroups that references this object.
- // TODO(mlam): refactor this to be more modular.
- {
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); i++) {
- ObjectGroup* group = groups->at(i);
- if (group == NULL) continue;
-
- bool found_group = false;
- for (size_t j = 0; j < group->length_; j++) {
- Object* object = *(group->objects_[j]);
- HeapObject* hobj = HeapObject::cast(object);
- if (obj2 == hobj) {
- found_group = true;
- break;
- }
- }
-
- if (found_group) {
- PrintF(f,
- "obj %p is a member of object group %p {\n",
- reinterpret_cast<void*>(obj2),
- reinterpret_cast<void*>(group));
- for (size_t j = 0; j < group->length_; j++) {
- Object* object = *(group->objects_[j]);
- if (!object->IsHeapObject()) continue;
-
- HeapObject* hobj = HeapObject::cast(object);
- int id = GetObjId(hobj);
- if (id != 0) {
- PrintF(f, " @%d:", id);
- } else {
- PrintF(f, " <no id>:");
- }
-
- char buffer[512];
- GenerateObjectDesc(hobj, buffer, sizeof(buffer));
- PrintF(f, " %s", buffer);
- if (hobj == obj2) {
- PrintF(f, " <===");
- }
- PrintF(f, "\n");
- }
- PrintF(f, "}\n");
- }
- }
- }
-
- PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2));
- heap->IterateRoots(&tracer, VISIT_ONLY_STRONG);
- found = tracer.found();
-
- if (!found) {
- PrintF(f, " No paths found. Checking symbol tables ...\n");
- SymbolTable* symbol_table = HEAP->raw_unchecked_symbol_table();
- tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table),
- reinterpret_cast<Object**>(&symbol_table)+1);
- found = tracer.found();
- if (!found) {
- symbol_table->IteratePrefix(&tracer);
- found = tracer.found();
- }
- }
-
- if (!found) {
- PrintF(f, " No paths found. Checking weak roots ...\n");
- // Check weak refs next.
- isolate->global_handles()->IterateWeakRoots(&tracer);
- found = tracer.found();
- }
-
- } else {
- PrintF(f, "path from obj %p to obj %p:\n",
- reinterpret_cast<void*>(obj1), reinterpret_cast<void*>(obj2));
- tracer.TracePathFrom(reinterpret_cast<Object**>(&obj1));
- found = tracer.found();
- }
-
- if (!found) {
- PrintF(f, " No paths found\n\n");
- }
- }
-
- // Flush and clean up the dumped file.
- Flush(f);
- fclose(f);
-
- // Restore the previous verbosity.
- FLAG_use_verbose_printer = prev_verbosity;
-
- // Create a string from the temp_file.
- // Note: the mmapped resource will take care of closing the file.
- MemoryMappedExternalResource* resource =
- new MemoryMappedExternalResource(temp_filename.start(), true);
- if (resource->exists() && !resource->is_empty()) {
- ASSERT(resource->IsAscii());
- Handle<String> path_string =
- factory->NewExternalStringFromAscii(resource);
- heap->external_string_table()->AddString(*path_string);
- return *path_string;
- } else {
- delete resource;
- }
- return heap->undefined_value();
-}
-
-
-Object* LiveObjectList::GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- HandleScope scope;
-
- // Get the target object.
- HeapObject* obj1 = NULL;
- if (obj_id1 != 0) {
- obj1 = HeapObject::cast(GetObj(obj_id1));
- if (obj1 == HEAP->undefined_value()) {
- return obj1;
- }
- }
-
- HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2));
- if (obj2 == HEAP->undefined_value()) {
- return obj2;
- }
-
- return GetPathPrivate(obj1, obj2);
-}
-
-
-void LiveObjectList::DoProcessNonLive(HeapObject* obj) {
- // We should only be called if we have at least one lol to search.
- ASSERT(last() != NULL);
- Element* element = last()->Find(obj);
- if (element != NULL) {
- NullifyNonLivePointer(&element->obj_);
- }
-}
-
-
-void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) {
- LiveObjectList* lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- int count = lol->obj_count_;
- for (int i = 0; i < count; i++) {
- HeapObject** p = &elements[i].obj_;
- v->VisitPointer(reinterpret_cast<Object** >(p));
- }
- lol = lol->prev_;
- }
-}
-
-
-// Purpose: Called by GCEpilogue to purge duplicates. Not to be called by
-// anyone else.
-void LiveObjectList::PurgeDuplicates() {
- bool is_sorted = false;
- LiveObjectList* lol = last();
- if (!lol) {
- return; // Nothing to purge.
- }
-
- int total_count = lol->TotalObjCount();
- if (!total_count) {
- return; // Nothing to purge.
- }
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- while (lol) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the consolidated list and check for dups.
- total_count--;
- for (int i = 0; i < total_count; ) {
- Element* curr = &elements[i];
- HeapObject* curr_obj = curr->obj_;
- int j = i+1;
- bool done = false;
-
- while (!done && (j < total_count)) {
- // Process if the element's object is still live after the current GC.
- // Non-live objects will be converted to SMIs i.e. not HeapObjects.
- if (curr_obj->IsHeapObject()) {
- Element* next = &elements[j];
- HeapObject* next_obj = next->obj_;
- if (next_obj->IsHeapObject()) {
- if (curr_obj != next_obj) {
- done = true;
- continue; // Live object but no match. Move on.
- }
-
- // NOTE: we've just GCed the LOLs. Hence, they are no longer sorted.
- // Since we detected at least one need to search for entries, we'll
- // sort it to enable the use of NullifyMostRecent() below. We only
- // need to sort it once (except for one exception ... see below).
- if (!is_sorted) {
- SortAll();
- is_sorted = true;
- }
-
- // We have a match. Need to nullify the most recent ref to this
- // object. We'll keep the oldest ref:
- // Note: we will nullify the element record in the LOL
- // database, not in the local sorted copy of the elements.
- NullifyMostRecent(curr_obj);
- }
- }
- // Either the object was already marked for purging, or we just marked
- // it. Either way, if there's more than one dup, then we need to check
- // the next element for another possible dup against the current as well
- // before we move on. So, here we go.
- j++;
- }
-
- // We can move on to checking the match on the next element.
- i = j;
- }
-
- DeleteArray<Element>(elements);
-}
-
-
-// Purpose: Purges dead objects and resorts the LOLs.
-void LiveObjectList::GCEpiloguePrivate() {
- // Note: During the GC, ConsStrings may be collected and pointers may be
- // forwarded to its constituent string. As a result, we may find dupes of
- // objects references in the LOL list.
- // Another common way we get dups is that free chunks that have been swept
- // in the oldGen heap may be kept as ByteArray objects in a free list.
- //
- // When we promote live objects from the youngGen, the object may be moved
- // to the start of these free chunks. Since there is no free or move event
- // for the free chunks, their addresses will show up 2 times: once for their
- // original free ByteArray selves, and once for the newly promoted youngGen
- // object. Hence, we can get a duplicate address in the LOL again.
- //
- // We need to eliminate these dups because the LOL implementation expects to
- // only have at most one unique LOL reference to any object at any time.
- PurgeDuplicates();
-
- // After the GC, sweep away all free'd Elements and compact.
- LiveObjectList* prev = NULL;
- LiveObjectList* next = NULL;
-
- // Iterating from the youngest lol to the oldest lol.
- for (LiveObjectList* lol = last(); lol; lol = prev) {
- Element* elements = lol->elements_;
- prev = lol->prev(); // Save the prev.
-
- // Remove any references to collected objects.
- int i = 0;
- while (i < lol->obj_count_) {
- Element& element = elements[i];
- if (!element.obj_->IsHeapObject()) {
- // If the HeapObject address was converted into a SMI, then this
- // is a dead object. Copy the last element over this one.
- element = elements[lol->obj_count_ - 1];
- lol->obj_count_--;
- // We've just moved the last element into this index. We'll revisit
- // this index again. Hence, no need to increment the iterator.
- } else {
- i++; // Look at the next element next.
- }
- }
-
- int new_count = lol->obj_count_;
-
- // Check if there are any more elements to keep after purging the dead ones.
- if (new_count == 0) {
- DeleteArray<Element>(elements);
- lol->elements_ = NULL;
- lol->capacity_ = 0;
- ASSERT(lol->obj_count_ == 0);
-
- // If the list is also invisible, the clean up the list as well.
- if (lol->id_ == 0) {
- // Point the next lol's prev to this lol's prev.
- if (next) {
- next->prev_ = lol->prev_;
- } else {
- last_ = lol->prev_;
- }
-
- // Delete this now empty and invisible lol.
- delete lol;
-
- // Don't point the next to this lol since it is now deleted.
- // Leave the next pointer pointing to the current lol.
- continue;
- }
-
- } else {
- // If the obj_count_ is less than the capacity and the difference is
- // greater than a specified threshold, then we should shrink the list.
- int diff = lol->capacity_ - new_count;
- const int kMaxUnusedSpace = 64;
- if (diff > kMaxUnusedSpace) { // Threshold for shrinking.
- // Shrink the list.
- Element* new_elements = NewArray<Element>(new_count);
- memcpy(new_elements, elements, new_count * sizeof(Element));
-
- DeleteArray<Element>(elements);
- lol->elements_ = new_elements;
- lol->capacity_ = new_count;
- }
- ASSERT(lol->obj_count_ == new_count);
-
- lol->Sort(); // We've moved objects. Re-sort in case.
- }
-
- // Save the next (for the previous link) in case we need it later.
- next = lol;
- }
-
-#ifdef VERIFY_LOL
- if (FLAG_verify_lol) {
- Verify();
- }
-#endif
-}
-
-
-#ifdef VERIFY_LOL
-void LiveObjectList::Verify(bool match_heap_exactly) {
- OS::Print("Verifying the LiveObjectList database:\n");
-
- LiveObjectList* lol = last();
- if (lol == NULL) {
- OS::Print(" No lol database to verify\n");
- return;
- }
-
- OS::Print(" Preparing the lol database ...\n");
- int total_count = lol->TotalObjCount();
-
- Element* elements = NewArray<Element>(total_count);
- int count = 0;
-
- // Copy all the object elements into a consecutive array.
- OS::Print(" Copying the lol database ...\n");
- while (lol != NULL) {
- memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element));
- count += lol->obj_count_;
- lol = lol->prev_;
- }
- qsort(elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement));
-
- ASSERT(count == total_count);
-
- // Iterate over all objects in the heap and check for:
- // 1. object in LOL but not in heap i.e. error.
- // 2. object in heap but not in LOL (possibly not an error). Usually
- // just means that we don't have the a capture of the latest heap.
- // That is unless we did this verify immediately after a capture,
- // and specified match_heap_exactly = true.
-
- int number_of_heap_objects = 0;
- int number_of_matches = 0;
- int number_not_in_heap = total_count;
- int number_not_in_lol = 0;
-
- OS::Print(" Start verify ...\n");
- OS::Print(" Verifying ...");
- Flush();
- HeapIterator iterator;
- HeapObject* heap_obj = NULL;
- while ((heap_obj = iterator.next()) != NULL) {
- number_of_heap_objects++;
-
- // Check if the heap_obj is in the lol.
- Element key;
- key.obj_ = heap_obj;
-
- Element* result = reinterpret_cast<Element*>(
- bsearch(&key, elements, total_count, sizeof(Element),
- reinterpret_cast<RawComparer>(CompareElement)));
-
- if (result != NULL) {
- number_of_matches++;
- number_not_in_heap--;
- // Mark it as found by changing it into a SMI (mask off low bit).
- // Note: we cannot use HeapObject::cast() here because it asserts that
- // the HeapObject bit is set on the address, but we're unsetting it on
- // purpose here for our marking.
- result->obj_ = reinterpret_cast<HeapObject*>(heap_obj->address());
-
- } else {
- number_not_in_lol++;
- if (match_heap_exactly) {
- OS::Print("heap object %p NOT in lol database\n", heap_obj);
- }
- }
- // Show some sign of life.
- if (number_of_heap_objects % 1000 == 0) {
- OS::Print(".");
- fflush(stdout);
- }
- }
- OS::Print("\n");
-
- // Reporting lol objects not found in the heap.
- if (number_not_in_heap) {
- int found = 0;
- for (int i = 0; (i < total_count) && (found < number_not_in_heap); i++) {
- Element& element = elements[i];
- if (element.obj_->IsHeapObject()) {
- OS::Print("lol database object [%d of %d] %p NOT in heap\n",
- i, total_count, element.obj_);
- found++;
- }
- }
- }
-
- DeleteArray<Element>(elements);
-
- OS::Print("number of objects in lol database %d\n", total_count);
- OS::Print("number of heap objects .......... %d\n", number_of_heap_objects);
- OS::Print("number of matches ............... %d\n", number_of_matches);
- OS::Print("number NOT in heap .............. %d\n", number_not_in_heap);
- OS::Print("number NOT in lol database ...... %d\n", number_not_in_lol);
-
- if (number_of_matches != total_count) {
- OS::Print(" *** ERROR: "
- "NOT all lol database objects match heap objects.\n");
- }
- if (number_not_in_heap != 0) {
- OS::Print(" *** ERROR: %d lol database objects not found in heap.\n",
- number_not_in_heap);
- }
- if (match_heap_exactly) {
- if (!(number_not_in_lol == 0)) {
- OS::Print(" *** ERROR: %d heap objects NOT found in lol database.\n",
- number_not_in_lol);
- }
- }
-
- ASSERT(number_of_matches == total_count);
- ASSERT(number_not_in_heap == 0);
- ASSERT(number_not_in_lol == (number_of_heap_objects - total_count));
- if (match_heap_exactly) {
- ASSERT(total_count == number_of_heap_objects);
- ASSERT(number_not_in_lol == 0);
- }
-
- OS::Print(" Verify the lol database is sorted ...\n");
- lol = last();
- while (lol != NULL) {
- Element* elements = lol->elements_;
- for (int i = 0; i < lol->obj_count_ - 1; i++) {
- if (elements[i].obj_ >= elements[i+1].obj_) {
- OS::Print(" *** ERROR: lol %p obj[%d] %p > obj[%d] %p\n",
- lol, i, elements[i].obj_, i+1, elements[i+1].obj_);
- }
- }
- lol = lol->prev_;
- }
-
- OS::Print(" DONE verifying.\n\n\n");
-}
-
-
-void LiveObjectList::VerifyNotInFromSpace() {
- OS::Print("VerifyNotInFromSpace() ...\n");
- LolIterator it(NULL, last());
- Heap* heap = ISOLATE->heap();
- int i = 0;
- for (it.Init(); !it.Done(); it.Next()) {
- HeapObject* heap_obj = it.Obj();
- if (heap->InFromSpace(heap_obj)) {
- OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n",
- i++, heap_obj, Heap::new_space()->FromSpaceStart());
- }
- }
-}
-#endif // VERIFY_LOL
-
-
-} } // namespace v8::internal
-
-#endif // LIVE_OBJECT_LIST
diff --git a/src/3rdparty/v8/src/liveobjectlist.h b/src/3rdparty/v8/src/liveobjectlist.h
deleted file mode 100644
index 1aa9196..0000000
--- a/src/3rdparty/v8/src/liveobjectlist.h
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_LIVEOBJECTLIST_H_
-#define V8_LIVEOBJECTLIST_H_
-
-#include "v8.h"
-
-#include "checks.h"
-#include "heap.h"
-#include "objects.h"
-#include "globals.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef LIVE_OBJECT_LIST
-
-#ifdef DEBUG
-// The following symbol when defined enables thorough verification of lol data.
-// FLAG_verify_lol will also need to set to true to enable the verification.
-#define VERIFY_LOL
-#endif
-
-
-typedef int LiveObjectType;
-class LolFilter;
-class LiveObjectSummary;
-class DumpWriter;
-class SummaryWriter;
-
-
-// The LiveObjectList is both a mechanism for tracking a live capture of
-// objects in the JS heap, as well as is the data structure which represents
-// each of those captures. Unlike a snapshot, the lol is live. For example,
-// if an object in a captured lol dies and is collected by the GC, the lol
-// will reflect that the object is no longer available. The term
-// LiveObjectList (and lol) is used to describe both the mechanism and the
-// data structure depending on context of use.
-//
-// In captured lols, objects are tracked using their address and an object id.
-// The object id is unique. Once assigned to an object, the object id can never
-// be assigned to another object. That is unless all captured lols are deleted
-// which allows the user to start over with a fresh set of lols and object ids.
-// The uniqueness of the object ids allows the user to track specific objects
-// and inspect its longevity while debugging JS code in execution.
-//
-// The lol comes with utility functions to capture, dump, summarize, and diff
-// captured lols amongst other functionality. These functionality are
-// accessible via the v8 debugger interface.
-class LiveObjectList {
- public:
- inline static void GCEpilogue();
- inline static void GCPrologue();
- inline static void IterateElements(ObjectVisitor* v);
- inline static void ProcessNonLive(HeapObject* obj);
- inline static void UpdateReferencesForScavengeGC();
-
- // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
- // compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield
- // a verbose dump of all the objects in the resultant lists.
- // Similarly, a summarized result of a LOL listing or a diff can be
- // attained using the Summarize(0, <lol id>) and Summarize(<lol id1,
- // <lol id2>, ...) respectively.
-
- static MaybeObject* Capture();
- static bool Delete(int id);
- static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj);
- static MaybeObject* Info(int start_idx, int dump_limit);
- static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj);
-
- static void Reset();
- static Object* GetObj(int obj_id);
- static int GetObjId(Object* obj);
- static Object* GetObjId(Handle<String> address);
- static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj);
-
- static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter);
- static Object* PrintObj(int obj_id);
-
- private:
- struct Element {
- int id_;
- HeapObject* obj_;
- };
-
- explicit LiveObjectList(LiveObjectList* prev, int capacity);
- ~LiveObjectList();
-
- static void GCEpiloguePrivate();
- static void IterateElementsPrivate(ObjectVisitor* v);
-
- static void DoProcessNonLive(HeapObject* obj);
-
- static int CompareElement(const Element* a, const Element* b);
-
- static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2);
-
- static int GetRetainers(Handle<HeapObject> target,
- Handle<JSObject> instance_filter,
- Handle<FixedArray> retainers_arr,
- int start,
- int dump_limit,
- int* total_count,
- LolFilter* filter,
- LiveObjectSummary* summary,
- JSFunction* arguments_function,
- Handle<Object> error);
-
- static MaybeObject* DumpPrivate(DumpWriter* writer,
- int start,
- int dump_limit,
- LolFilter* filter);
- static MaybeObject* SummarizePrivate(SummaryWriter* writer,
- LolFilter* filter,
- bool is_tracking_roots);
-
- static bool NeedLOLProcessing() { return (last() != NULL); }
- static void NullifyNonLivePointer(HeapObject** p) {
- // Mask out the low bit that marks this as a heap object. We'll use this
- // cleared bit as an indicator that this pointer needs to be collected.
- //
- // Meanwhile, we still preserve its approximate value so that we don't
- // have to resort the elements list all the time.
- //
- // Note: Doing so also makes this HeapObject* look like an SMI. Hence,
- // GC pointer updater will ignore it when it gets scanned.
- *p = reinterpret_cast<HeapObject*>((*p)->address());
- }
-
- LiveObjectList* prev() { return prev_; }
- LiveObjectList* next() { return next_; }
- int id() { return id_; }
-
- static int list_count() { return list_count_; }
- static LiveObjectList* last() { return last_; }
-
- inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol);
- int TotalObjCount() { return GetTotalObjCountAndSize(NULL); }
- int GetTotalObjCountAndSize(int* size_p);
-
- bool Add(HeapObject* obj);
- Element* Find(HeapObject* obj);
- static void NullifyMostRecent(HeapObject* obj);
- void Sort();
- static void SortAll();
-
- static void PurgeDuplicates(); // Only to be called by GCEpilogue.
-
-#ifdef VERIFY_LOL
- static void Verify(bool match_heap_exactly = false);
- static void VerifyNotInFromSpace();
-#endif
-
- // Iterates the elements in every lol and returns the one that matches the
- // specified key. If no matching element is found, then it returns NULL.
- template <typename T>
- inline static LiveObjectList::Element*
- FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key);
-
- inline static int GetElementId(Element* element);
- inline static HeapObject* GetElementObj(Element* element);
-
- // Instance fields.
- LiveObjectList* prev_;
- LiveObjectList* next_;
- int id_;
- int capacity_;
- int obj_count_;
- Element* elements_;
-
- // Statics for managing all the lists.
- static uint32_t next_element_id_;
- static int list_count_;
- static int last_id_;
- static LiveObjectList* first_;
- static LiveObjectList* last_;
-
- friend class LolIterator;
- friend class LolForwardIterator;
- friend class LolDumpWriter;
- friend class RetainersDumpWriter;
- friend class RetainersSummaryWriter;
- friend class UpdateLiveObjectListVisitor;
-};
-
-
-// Helper class for updating the LiveObjectList HeapObject pointers.
-class UpdateLiveObjectListVisitor: public ObjectVisitor {
- public:
- void VisitPointer(Object** p) { UpdatePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end).
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- private:
- // Based on Heap::ScavengeObject() but only does forwarding of pointers
- // to live new space objects, and not actually keep them alive.
- void UpdatePointer(Object** p) {
- Object* object = *p;
- if (!HEAP->InNewSpace(object)) return;
-
- HeapObject* heap_obj = HeapObject::cast(object);
- ASSERT(HEAP->InFromSpace(heap_obj));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = heap_obj->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- *p = first_word.ToForwardingAddress();
- return;
-
- // Else, it's a dead object.
- } else {
- LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p));
- }
- }
-};
-
-
-#else // !LIVE_OBJECT_LIST
-
-
-class LiveObjectList {
- public:
- inline static void GCEpilogue() {}
- inline static void GCPrologue() {}
- inline static void IterateElements(ObjectVisitor* v) {}
- inline static void ProcessNonLive(HeapObject* obj) {}
- inline static void UpdateReferencesForScavengeGC() {}
-
- inline static MaybeObject* Capture() { return HEAP->undefined_value(); }
- inline static bool Delete(int id) { return false; }
- inline static MaybeObject* Dump(int id1,
- int id2,
- int start_idx,
- int dump_limit,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Info(int start_idx, int dump_limit) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* Summarize(int id1,
- int id2,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static void Reset() {}
- inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); }
- inline static Object* GetObjId(Handle<String> address) {
- return HEAP->undefined_value();
- }
- inline static MaybeObject* GetObjRetainers(int obj_id,
- Handle<JSObject> instance_filter,
- bool verbose,
- int start,
- int count,
- Handle<JSObject> filter_obj) {
- return HEAP->undefined_value();
- }
-
- inline static Object* GetPath(int obj_id1,
- int obj_id2,
- Handle<JSObject> instance_filter) {
- return HEAP->undefined_value();
- }
- inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); }
-};
-
-
-#endif // LIVE_OBJECT_LIST
-
-} } // namespace v8::internal
-
-#endif // V8_LIVEOBJECTLIST_H_
diff --git a/src/3rdparty/v8/src/log-utils.cc b/src/3rdparty/v8/src/log-utils.cc
index 7bd7baa..830c3da 100644
--- a/src/3rdparty/v8/src/log-utils.cc
+++ b/src/3rdparty/v8/src/log-utils.cc
@@ -67,6 +67,7 @@ void Log::Initialize() {
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_regexp = true;
+ FLAG_log_internal_timer_events = true;
}
// --prof implies --log-code.
@@ -80,7 +81,8 @@ void Log::Initialize() {
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
+ || FLAG_log_internal_timer_events;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
@@ -257,10 +259,10 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (len > 0x1000)
len = 0x1000;
if (show_impl_info) {
- Append(str->IsAsciiRepresentation() ? 'a' : '2');
+ Append(str->IsOneByteRepresentation() ? 'a' : '2');
if (StringShape(str).IsExternal())
Append('e');
- if (StringShape(str).IsSymbol())
+ if (StringShape(str).IsInternalized())
Append('#');
Append(":%i:", str->length());
}
diff --git a/src/3rdparty/v8/src/log.cc b/src/3rdparty/v8/src/log.cc
index b049ffe..2ed0141 100644
--- a/src/3rdparty/v8/src/log.cc
+++ b/src/3rdparty/v8/src/log.cc
@@ -44,37 +44,6 @@
namespace v8 {
namespace internal {
-//
-// Sliding state window. Updates counters to keep track of the last
-// window of kBufferSize states. This is useful to track where we
-// spent our time.
-//
-class SlidingStateWindow {
- public:
- explicit SlidingStateWindow(Isolate* isolate);
- ~SlidingStateWindow();
- void AddState(StateTag state);
-
- private:
- static const int kBufferSize = 256;
- Counters* counters_;
- int current_index_;
- bool is_full_;
- byte buffer_[kBufferSize];
-
-
- void IncrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Increment();
- }
-
-
- void DecrementStateCounter(StateTag state) {
- counters_->state_counters(state)->Decrement();
- }
-};
-
-
-//
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
@@ -189,24 +158,12 @@ class Ticker: public Sampler {
public:
Ticker(Isolate* isolate, int interval):
Sampler(isolate, interval),
- window_(NULL),
profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
- if (window_) window_->AddState(sample->state);
- }
-
- void SetWindow(SlidingStateWindow* window) {
- window_ = window;
- if (!IsActive()) Start();
- }
-
- void ClearWindow() {
- window_ = NULL;
- if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
}
void SetProfiler(Profiler* profiler) {
@@ -219,7 +176,7 @@ class Ticker: public Sampler {
void ClearProfiler() {
DecreaseProfilingDepth();
profiler_ = NULL;
- if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
+ if (IsActive()) Stop();
}
protected:
@@ -228,42 +185,11 @@ class Ticker: public Sampler {
}
private:
- SlidingStateWindow* window_;
Profiler* profiler_;
};
//
-// SlidingStateWindow implementation.
-//
-SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
- : counters_(isolate->counters()), current_index_(0), is_full_(false) {
- for (int i = 0; i < kBufferSize; i++) {
- buffer_[i] = static_cast<byte>(OTHER);
- }
- isolate->logger()->ticker_->SetWindow(this);
-}
-
-
-SlidingStateWindow::~SlidingStateWindow() {
- LOGGER->ticker_->ClearWindow();
-}
-
-
-void SlidingStateWindow::AddState(StateTag state) {
- if (is_full_) {
- DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
- } else if (current_index_ == kBufferSize - 1) {
- is_full_ = true;
- }
- buffer_[current_index_] = static_cast<byte>(state);
- IncrementStateCounter(state);
- ASSERT(IsPowerOf2(kBufferSize));
- current_index_ = (current_index_ + 1) & (kBufferSize - 1);
-}
-
-
-//
// Profiler implementation.
//
Profiler::Profiler(Isolate* isolate)
@@ -283,11 +209,7 @@ void Profiler::Engage() {
if (engaged_) return;
engaged_ = true;
- // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised.
- // http://code.google.com/p/v8/issues/detail?id=487
- if (!FLAG_prof_lazy) {
- OS::LogSharedLibraryAddresses();
- }
+ OS::LogSharedLibraryAddresses();
// Start thread processing the profiler buffer.
running_ = true;
@@ -458,7 +380,10 @@ class Logger::NameBuffer {
if (str == NULL) return;
if (str->HasOnlyAsciiChars()) {
int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_);
- String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length);
+ String::WriteToFlat(str,
+ reinterpret_cast<uint8_t*>(utf8_buffer_ + utf8_pos_),
+ 0,
+ utf8_length);
utf8_pos_ += utf8_length;
return;
}
@@ -467,7 +392,7 @@ class Logger::NameBuffer {
int previous = unibrow::Utf16::kNoPreviousCharacter;
for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) {
uc16 c = utf16_buffer[i];
- if (c <= String::kMaxAsciiCharCodeU) {
+ if (c <= unibrow::Utf8::kMaxOneByteChar) {
utf8_buffer_[utf8_pos_++] = static_cast<char>(c);
} else {
int char_length = unibrow::Utf8::Length(c, previous);
@@ -515,10 +440,10 @@ class Logger::NameBuffer {
};
-Logger::Logger()
- : ticker_(NULL),
+Logger::Logger(Isolate* isolate)
+ : isolate_(isolate),
+ ticker_(NULL),
profiler_(NULL),
- sliding_state_window_(NULL),
log_events_(NULL),
logging_nesting_(0),
cpu_profiler_nesting_(0),
@@ -531,7 +456,8 @@ Logger::Logger()
prev_sp_(NULL),
prev_function_(NULL),
prev_to_(NULL),
- prev_code_(NULL) {
+ prev_code_(NULL),
+ epoch_(0) {
}
@@ -543,12 +469,17 @@ Logger::~Logger() {
void Logger::IssueCodeAddedEvent(Code* code,
+ Script* script,
const char* name,
size_t name_len) {
JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
event.type = JitCodeEvent::CODE_ADDED;
event.code_start = code->instruction_start();
event.code_len = code->instruction_size();
+ Handle<Script> script_handle =
+ script != NULL ? Handle<Script>(script) : Handle<Script>();
+ event.script = v8::Handle<v8::Script>(ToApi<v8::Script>(script_handle));
event.name.str = name;
event.name.len = name_len;
@@ -587,6 +518,40 @@ void Logger::IssueCodeRemovedEvent(Address from) {
code_event_handler_(&event);
}
+void Logger::IssueAddCodeLinePosInfoEvent(
+ void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_type) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO;
+ event.user_data = jit_handler_data;
+ event.line_info.offset = pc_offset;
+ event.line_info.pos = position;
+ event.line_info.position_type = position_type;
+
+ code_event_handler_(&event);
+}
+
+void* Logger::IssueStartCodePosInfoEvent() {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING;
+
+ code_event_handler_(&event);
+ return event.user_data;
+}
+
+void Logger::IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data) {
+ JitCodeEvent event;
+ memset(&event, 0, sizeof(event));
+ event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING;
+ event.code_start = code->instruction_start();
+ event.user_data = jit_handler_data;
+
+ code_event_handler_(&event);
+}
#define DECLARE_EVENT(ignore1, name) name,
static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@@ -704,6 +669,43 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
}
+void Logger::TimerEvent(StartEnd se, const char* name) {
+ if (!log_->IsEnabled()) return;
+ ASSERT(FLAG_log_internal_timer_events);
+ LogMessageBuilder msg(this);
+ int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
+ : "timer-event-end,\"%s\",%ld\n";
+ msg.Append(format, name, since_epoch);
+ msg.WriteToLogFile();
+}
+
+
+void Logger::EnterExternal() {
+ LOG(ISOLATE, TimerEvent(START, TimerEventScope::v8_external));
+}
+
+
+void Logger::LeaveExternal() {
+ LOG(ISOLATE, TimerEvent(END, TimerEventScope::v8_external));
+}
+
+
+void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
+ LOG(isolate_, TimerEvent(se, name_));
+}
+
+
+const char* Logger::TimerEventScope::v8_recompile_synchronous =
+ "V8.RecompileSynchronous";
+const char* Logger::TimerEventScope::v8_recompile_parallel =
+ "V8.RecompileParallel";
+const char* Logger::TimerEventScope::v8_compile_full_code =
+ "V8.CompileFullCode";
+const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
+const char* Logger::TimerEventScope::v8_external = "V8.External";
+
+
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
@@ -756,9 +758,11 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
}
-void Logger::LogRuntime(Vector<const char> format, JSArray* args) {
+void Logger::LogRuntime(Isolate* isolate,
+ Vector<const char> format,
+ JSArray* args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope;
+ HandleScope scope(isolate);
LogMessageBuilder msg(this);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
@@ -874,7 +878,7 @@ void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,-3,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
@@ -919,7 +923,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendBytes(comment);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -930,9 +934,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,%d,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
+ kLogEventsNames[tag],
+ code->kind());
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
@@ -958,7 +963,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendString(name);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -969,9 +974,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,%d,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
+ kLogEventsNames[tag],
+ code->kind());
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(name, false);
@@ -1004,7 +1010,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendString(name);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ Script* script =
+ shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
+ IssueCodeAddedEvent(code,
+ script,
+ name_buffer_->get(),
+ name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -1021,9 +1032,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
LogMessageBuilder msg(this);
SmartArrayPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,%d,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
+ kLogEventsNames[tag],
+ code->kind());
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
msg.AppendAddress(shared->address());
@@ -1053,7 +1065,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
name_buffer_->AppendInt(line);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ Script* script =
+ shared->script()->IsScript() ? Script::cast(shared->script()) : NULL;
+ IssueCodeAddedEvent(code,
+ script,
+ name_buffer_->get(),
+ name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -1068,9 +1085,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartArrayPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,%d,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
+ kLogEventsNames[tag],
+ code->kind());
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s %s:%d\",",
code->ExecutableSize(),
@@ -1093,7 +1111,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
name_buffer_->AppendInt(args_count);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -1104,9 +1122,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,%d,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag]);
+ kLogEventsNames[tag],
+ code->kind());
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
msg.Append('\n');
@@ -1130,7 +1149,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
name_buffer_->AppendString(source);
}
if (code_event_handler_ != NULL) {
- IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size());
+ IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size());
}
if (!log_->IsEnabled()) return;
if (FLAG_ll_prof) {
@@ -1141,7 +1160,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,",
+ msg.Append("%s,%s,-2,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[REG_EXP_TAG]);
msg.AppendAddress(code->address());
@@ -1174,6 +1193,40 @@ void Logger::CodeDeleteEvent(Address from) {
DeleteEventInternal(CODE_DELETE_EVENT, from);
}
+void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position) {
+ if (code_event_handler_ != NULL) {
+ IssueAddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::POSITION);
+ }
+}
+
+void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position) {
+ if (code_event_handler_ != NULL) {
+ IssueAddCodeLinePosInfoEvent(jit_handler_data,
+ pc_offset,
+ position,
+ JitCodeEvent::STATEMENT_POSITION);
+ }
+}
+
+void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) {
+ if (code_event_handler_ != NULL) {
+ pos_recorder->AttachJITHandlerData(IssueStartCodePosInfoEvent());
+ }
+}
+
+void Logger::CodeEndLinePosInfoRecordEvent(Code* code,
+ void* jit_handler_data) {
+ if (code_event_handler_ != NULL) {
+ IssueEndCodePosInfoEvent(code, jit_handler_data);
+ }
+}
void Logger::SnapshotPositionEvent(Address addr, int pos) {
if (!log_->IsEnabled()) return;
@@ -1250,7 +1303,7 @@ void Logger::SuspectReadEvent(String* name, Object* obj) {
LogMessageBuilder msg(this);
String* class_name = obj->IsJSObject()
? JSObject::cast(obj)->class_name()
- : HEAP->empty_string();
+ : isolate_->heap()->empty_string();
msg.Append("suspect-read,");
msg.Append(class_name);
msg.Append(',');
@@ -1321,6 +1374,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp);
+ msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1353,9 +1407,7 @@ void Logger::PauseProfiler() {
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
if (FLAG_prof_lazy) {
- if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
- ticker_->Stop();
- }
+ ticker_->Stop();
FLAG_log_code = false;
LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
@@ -1376,9 +1428,7 @@ void Logger::ResumeProfiler() {
FLAG_log_code = true;
LogCompiledFunctions();
LogAccessorCallbacks();
- if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
- ticker_->Start();
- }
+ if (!ticker_->IsActive()) ticker_->Start();
}
profiler_->resume();
}
@@ -1430,9 +1480,10 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
};
-static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis,
+static int EnumerateCompiledFunctions(Heap* heap,
+ Handle<SharedFunctionInfo>* sfis,
Handle<Code>* code_objects) {
- HeapIterator iterator;
+ HeapIterator iterator(heap);
AssertNoAllocation no_alloc;
int compiled_funcs_count = 0;
@@ -1477,6 +1528,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::BINARY_OP_IC: // fall through
case Code::COMPARE_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
+ case Code::COMPILED_STUB: // fall through
case Code::STUB:
description =
CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
@@ -1597,9 +1649,10 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
void Logger::LogCodeObjects() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCodeObjects");
- HeapIterator iterator;
+ HeapIterator iterator(heap);
AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsCode()) LogCodeObject(obj);
@@ -1653,13 +1706,14 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
void Logger::LogCompiledFunctions() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogCompiledFunctions");
- HandleScope scope;
- const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
+ HandleScope scope(isolate_);
+ const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL);
ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
ScopedVector< Handle<Code> > code_objects(compiled_funcs_count);
- EnumerateCompiledFunctions(sfis.start(), code_objects.start());
+ EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start());
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
@@ -1673,13 +1727,14 @@ void Logger::LogCompiledFunctions() {
void Logger::LogAccessorCallbacks() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ Heap* heap = isolate_->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
"Logger::LogAccessorCallbacks");
- HeapIterator iterator;
+ HeapIterator iterator(heap);
AssertNoAllocation no_alloc;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
- if (!obj->IsAccessorInfo()) continue;
- AccessorInfo* ai = AccessorInfo::cast(obj);
+ if (!obj->IsExecutableAccessorInfo()) continue;
+ ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj);
if (!ai->name()->IsString()) continue;
String* name = String::cast(ai->name());
Address getter_entry = v8::ToCData<Address>(ai->getter());
@@ -1721,13 +1776,10 @@ bool Logger::SetUp() {
Isolate* isolate = Isolate::Current();
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
- if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(isolate);
- }
-
bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
+ || FLAG_log_internal_timer_events;
if (start_logging) {
logging_nesting_ = 1;
@@ -1745,6 +1797,8 @@ bool Logger::SetUp() {
}
}
+ if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
+
return true;
}
@@ -1754,7 +1808,7 @@ void Logger::SetCodeEventHandler(uint32_t options,
code_event_handler_ = event_handler;
if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) {
- HandleScope scope;
+ HandleScope scope(Isolate::Current());
LogCodeObjects();
LogCompiledFunctions();
}
@@ -1788,9 +1842,6 @@ FILE* Logger::TearDown() {
profiler_ = NULL;
}
- delete sliding_state_window_;
- sliding_state_window_ = NULL;
-
delete ticker_;
ticker_ = NULL;
@@ -1798,22 +1849,6 @@ FILE* Logger::TearDown() {
}
-void Logger::EnableSlidingStateWindow() {
- // If the ticker is NULL, Logger::SetUp has not been called yet. In
- // that case, we set the sliding_state_window flag so that the
- // sliding window computation will be started when Logger::SetUp is
- // called.
- if (ticker_ == NULL) {
- FLAG_sliding_state_window = true;
- return;
- }
- // Otherwise, if the sliding state window computation has not been
- // started we do it now.
- if (sliding_state_window_ == NULL) {
- sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
- }
-}
-
// Protects the state below.
static Mutex* active_samplers_mutex = NULL;
diff --git a/src/3rdparty/v8/src/log.h b/src/3rdparty/v8/src/log.h
index 33f359a..718dc02 100644
--- a/src/3rdparty/v8/src/log.h
+++ b/src/3rdparty/v8/src/log.h
@@ -74,8 +74,9 @@ namespace internal {
class LogMessageBuilder;
class Profiler;
class Semaphore;
-class SlidingStateWindow;
class Ticker;
+class Isolate;
+class PositionsRecorder;
#undef LOG
#define LOG(isolate, Call) \
@@ -127,13 +128,14 @@ class Ticker;
V(EVAL_TAG, "Eval") \
V(FUNCTION_TAG, "Function") \
V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \
- V(KEYED_LOAD_MEGAMORPHIC_IC_TAG, "KeyedLoadMegamorphicIC") \
+ V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \
V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \
- V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC") \
+ V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \
V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \
V(LAZY_COMPILE_TAG, "LazyCompile") \
V(LOAD_IC_TAG, "LoadIC") \
+ V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \
V(REG_EXP_TAG, "RegExp") \
V(SCRIPT_TAG, "Script") \
V(STORE_IC_TAG, "StoreIC") \
@@ -174,9 +176,6 @@ class Logger {
// leaving the file open.
FILE* TearDown();
- // Enable the computation of a sliding window of states.
- void EnableSlidingStateWindow();
-
// Emits an event with a string value -> (name, value).
void StringEvent(const char* name, const char* value);
@@ -249,6 +248,19 @@ class Logger {
void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
void CodeDeleteEvent(Address from);
+ // Emits a code line info add event with Postion type.
+ void CodeLinePosInfoAddPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position);
+ // Emits a code line info add event with StatementPostion type.
+ void CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data,
+ int pc_offset,
+ int position);
+ // Emits a code line info start to record event
+ void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder);
+ // Emits a code line info finish record event.
+ // It's the callee's responsibility to dispose the parameter jit_handler_data.
+ void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data);
void SharedFunctionInfoMoveEvent(Address from, Address to);
@@ -275,18 +287,54 @@ class Logger {
uintptr_t start,
uintptr_t end);
+ // ==== Events logged by --log-timer-events. ====
+ enum StartEnd { START, END };
+
+ void TimerEvent(StartEnd se, const char* name);
+
+ static void EnterExternal();
+ static void LeaveExternal();
+
+ class TimerEventScope {
+ public:
+ TimerEventScope(Isolate* isolate, const char* name)
+ : isolate_(isolate), name_(name) {
+ if (FLAG_log_internal_timer_events) LogTimerEvent(START);
+ }
+
+ ~TimerEventScope() {
+ if (FLAG_log_internal_timer_events) LogTimerEvent(END);
+ }
+
+ void LogTimerEvent(StartEnd se);
+
+ static const char* v8_recompile_synchronous;
+ static const char* v8_recompile_parallel;
+ static const char* v8_compile_full_code;
+ static const char* v8_execute;
+ static const char* v8_external;
+
+ private:
+ Isolate* isolate_;
+ const char* name_;
+ };
+
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- void LogRuntime(Vector<const char> format, JSArray* args);
+ void LogRuntime(Isolate* isolate, Vector<const char> format, JSArray* args);
bool is_logging() {
return logging_nesting_ > 0;
}
+ bool is_code_event_handler_enabled() {
+ return code_event_handler_ != NULL;
+ }
+
bool is_logging_code_events() {
return is_logging() || code_event_handler_ != NULL;
}
@@ -326,14 +374,22 @@ class Logger {
class NameBuffer;
class NameMap;
- Logger();
+ explicit Logger(Isolate* isolate);
~Logger();
// Issue code notifications.
- void IssueCodeAddedEvent(Code* code, const char* name, size_t name_len);
+ void IssueCodeAddedEvent(Code* code,
+ Script* script,
+ const char* name,
+ size_t name_len);
void IssueCodeMovedEvent(Address from, Address to);
void IssueCodeRemovedEvent(Address from);
-
+ void IssueAddCodeLinePosInfoEvent(void* jit_handler_data,
+ int pc_offset,
+ int position,
+ JitCodeEvent::PositionType position_Type);
+ void* IssueStartCodePosInfoEvent();
+ void IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data);
// Emits the profiler's first message.
void ProfilerBeginEvent();
@@ -393,6 +449,8 @@ class Logger {
// Returns whether profiler's sampler is active.
bool IsProfilerSamplerActive();
+ Isolate* isolate_;
+
// The sampler used by the profiler and the sliding state window.
Ticker* ticker_;
@@ -401,10 +459,6 @@ class Logger {
// of samples.
Profiler* profiler_;
- // SlidingStateWindow instance keeping a sliding window of the most
- // recent VM states.
- SlidingStateWindow* sliding_state_window_;
-
// An array of log events names.
const char* const* log_events_;
@@ -415,7 +469,6 @@ class Logger {
friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
- friend class SlidingStateWindow;
friend class StackTracer;
friend class VMState;
@@ -449,6 +502,8 @@ class Logger {
// Logger::FunctionCreateEvent(...)
Address prev_code_;
+ int64_t epoch_;
+
friend class CpuProfiler;
};
diff --git a/src/3rdparty/v8/src/macro-assembler.h b/src/3rdparty/v8/src/macro-assembler.h
index 11e2217..9e71123 100644
--- a/src/3rdparty/v8/src/macro-assembler.h
+++ b/src/3rdparty/v8/src/macro-assembler.h
@@ -36,6 +36,23 @@ enum InvokeFlag {
};
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2,
+ // Align the allocation to a multiple of kDoubleSize
+ DOUBLE_ALIGNMENT = 1 << 3
+};
+
+
// Invalid depth in prototype chain.
const int kInvalidProtoDepth = -1;
diff --git a/src/3rdparty/v8/src/macros.py b/src/3rdparty/v8/src/macros.py
index 08fa82e..291a898 100644
--- a/src/3rdparty/v8/src/macros.py
+++ b/src/3rdparty/v8/src/macros.py
@@ -32,6 +32,8 @@ const NONE = 0;
const READ_ONLY = 1;
const DONT_ENUM = 2;
const DONT_DELETE = 4;
+const NEW_ONE_BYTE_STRING = true;
+const NEW_TWO_BYTE_STRING = false;
# Constants used for getter and setter operations.
const GETTER = 0;
@@ -97,6 +99,7 @@ macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined');
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
+macro IS_SYMBOL(arg) = (%_IsSymbol(arg));
macro IS_OBJECT(arg) = (%_IsObject(arg));
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
diff --git a/src/3rdparty/v8/src/mark-compact.cc b/src/3rdparty/v8/src/mark-compact.cc
index 7040728..ba19bf3 100644
--- a/src/3rdparty/v8/src/mark-compact.cc
+++ b/src/3rdparty/v8/src/mark-compact.cc
@@ -36,11 +36,12 @@
#include "heap-profiler.h"
#include "ic-inl.h"
#include "incremental-marking.h"
-#include "liveobjectlist-inl.h"
#include "mark-compact.h"
+#include "marking-thread.h"
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
namespace v8 {
namespace internal {
@@ -65,6 +66,7 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
+ sweeping_pending_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
@@ -83,6 +85,16 @@ class VerifyMarkingVisitor: public ObjectVisitor {
}
}
}
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
+ rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
+ !rinfo->target_object()->IsMap() ||
+ !Map::cast(rinfo->target_object())->CanTransition()) {
+ VisitPointer(rinfo->target_object_address());
+ }
+ }
};
@@ -276,7 +288,8 @@ class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
case TYPE_FEEDBACK_INFO_TYPE:
object->Iterate(this);
break;
- case ACCESSOR_INFO_TYPE:
+ case DECLARED_ACCESSOR_INFO_TYPE:
+ case EXECUTABLE_ACCESSOR_INFO_TYPE:
case BYTE_ARRAY_TYPE:
case CALL_HANDLER_INFO_TYPE:
case CODE_TYPE:
@@ -383,7 +396,7 @@ void MarkCompactCollector::CollectGarbage() {
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveReferences();
ClearWeakMaps();
@@ -403,6 +416,16 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
+#ifdef VERIFY_HEAP
+ if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
+ heap()->weak_embedded_maps_verification_enabled()) {
+ VerifyWeakEmbeddedMapsInOptimizedCode();
+ }
+ if (FLAG_collect_maps && FLAG_omit_prototype_checks_for_leaf_maps) {
+ VerifyOmittedPrototypeChecks();
+ }
+#endif
+
Finish();
if (marking_parity_ == EVEN_MARKING_PARITY) {
@@ -454,6 +477,30 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
}
}
+
+
+void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
+ HeapObjectIterator code_iterator(heap()->code_space());
+ for (HeapObject* obj = code_iterator.Next();
+ obj != NULL;
+ obj = code_iterator.Next()) {
+ Code* code = Code::cast(obj);
+ if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
+ if (code->marked_for_deoptimization()) continue;
+ code->VerifyEmbeddedMapsDependency();
+ }
+}
+
+
+void MarkCompactCollector::VerifyOmittedPrototypeChecks() {
+ HeapObjectIterator iterator(heap()->map_space());
+ for (HeapObject* obj = iterator.Next();
+ obj != NULL;
+ obj = iterator.Next()) {
+ Map* map = Map::cast(obj);
+ map->VerifyOmittedPrototypeChecks();
+ }
+}
#endif // VERIFY_HEAP
@@ -488,11 +535,77 @@ void MarkCompactCollector::ClearMarkbits() {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
mark_bit.Next().Clear();
+ Page::FromAddress(obj->address())->ResetProgressBar();
Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
+void MarkCompactCollector::StartSweeperThreads() {
+ sweeping_pending_ = true;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ if (sweeping_pending_) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+ }
+ sweeping_pending_ = false;
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
+ heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+ }
+}
+
+
+intptr_t MarkCompactCollector::
+ StealMemoryFromSweeperThreads(PagedSpace* space) {
+ intptr_t freed_bytes = 0;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+ }
+ space->AddToAccountingStats(freed_bytes);
+ space->DecrementUnsweptFreeBytes(freed_bytes);
+ return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+ return heap()->isolate()->sweeper_threads() != NULL;
+}
+
+
+bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
+ return sweeping_pending_;
+}
+
+
+void MarkCompactCollector::FinalizeSweeping() {
+ ASSERT(sweeping_pending_ == false);
+ ReleaseEvacuationCandidates();
+ heap()->FreeQueuedChunks();
+}
+
+
+void MarkCompactCollector::MarkInParallel() {
+ for (int i = 0; i < FLAG_marking_threads; i++) {
+ heap()->isolate()->marking_threads()[i]->StartMarking();
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilMarkingCompleted() {
+ for (int i = 0; i < FLAG_marking_threads; i++) {
+ heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
+ }
+}
+
+
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -795,6 +908,12 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
+ // Instead of waiting we could also abort the sweeper threads here.
+ WaitUntilSweepingCompleted();
+ FinalizeSweeping();
+ }
+
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
@@ -809,7 +928,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
StartCompaction(NON_INCREMENTAL_COMPACTION);
}
- PagedSpaces spaces;
+ PagedSpaces spaces(heap());
for (PagedSpace* space = spaces.next();
space != NULL;
space = spaces.next()) {
@@ -824,6 +943,14 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
+class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code()->marked_for_deoptimization();
+ }
+};
+
+
void MarkCompactCollector::Finish() {
#ifdef DEBUG
ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
@@ -835,7 +962,8 @@ void MarkCompactCollector::Finish() {
// objects (empty string, illegal builtin).
heap()->isolate()->stub_cache()->Clear();
- heap()->external_string_table_.CleanUp();
+ DeoptimizeMarkedCodeFilter filter;
+ Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
}
@@ -884,8 +1012,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
- } else if (code == lazy_compile) {
- candidate->set_code(lazy_compile);
+ } else {
+ candidate->set_code(code);
}
// We are in the middle of a GC cycle so the write barrier in the code
@@ -934,10 +1062,51 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
+bool CodeFlusher::ContainsCandidate(SharedFunctionInfo* shared_info) {
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ while (candidate != NULL) {
+ if (candidate == shared_info) return true;
+ candidate = GetNextCandidate(candidate);
+ }
+ return false;
+}
+
+
+void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+
+ SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+ SharedFunctionInfo* next_candidate;
+ if (candidate == shared_info) {
+ next_candidate = GetNextCandidate(shared_info);
+ shared_function_info_candidates_head_ = next_candidate;
+ ClearNextCandidate(shared_info);
+ } else {
+ while (candidate != NULL) {
+ next_candidate = GetNextCandidate(candidate);
+
+ if (next_candidate == shared_info) {
+ next_candidate = GetNextCandidate(shared_info);
+ SetNextCandidate(candidate, next_candidate);
+ ClearNextCandidate(shared_info);
+ break;
+ }
+
+ candidate = next_candidate;
+ }
+ }
+}
+
+
void CodeFlusher::EvictCandidate(JSFunction* function) {
ASSERT(!function->next_function_link()->IsUndefined());
Object* undefined = isolate_->heap()->undefined_value();
+ // Make sure previous flushing decisions are revisited.
+ isolate_->heap()->incremental_marking()->RecordWrites(function);
+ isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
if (candidate == function) {
@@ -952,6 +1121,7 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
next_candidate = GetNextCandidate(function);
SetNextCandidate(candidate, next_candidate);
ClearNextCandidate(function, undefined);
+ break;
}
candidate = next_candidate;
@@ -961,17 +1131,14 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
void CodeFlusher::EvictJSFunctionCandidates() {
- Object* undefined = isolate_->heap()->undefined_value();
-
JSFunction* candidate = jsfunction_candidates_head_;
JSFunction* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate, undefined);
+ EvictCandidate(candidate);
candidate = next_candidate;
}
-
- jsfunction_candidates_head_ = NULL;
+ ASSERT(jsfunction_candidates_head_ == NULL);
}
@@ -980,11 +1147,10 @@ void CodeFlusher::EvictSharedFunctionInfoCandidates() {
SharedFunctionInfo* next_candidate;
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate);
+ EvictCandidate(candidate);
candidate = next_candidate;
}
-
- shared_function_info_candidates_head_ = NULL;
+ ASSERT(shared_function_info_candidates_head_ == NULL);
}
@@ -1012,7 +1178,7 @@ MarkCompactCollector::~MarkCompactCollector() {
static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-symbol
+ // Optimization: If the heap object pointed to by p is a non-internalized
// cons string whose right substring is HEAP->empty_string, update
// it in place to its left substring. Return the updated value.
//
@@ -1020,7 +1186,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
// (i.e., the left substring of a cons string is always a heap object).
//
// The check performed is:
- // object->IsConsString() && !object->IsSymbol() &&
+ // object->IsConsString() && !object->IsInternalizedString() &&
// (ConsString::cast(object)->second() == HEAP->empty_string())
// except the maps for the object and its possible substrings might be
// marked.
@@ -1131,9 +1297,9 @@ class MarkCompactMarkingVisitor
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- static inline bool VisitUnmarkedObjects(Heap* heap,
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap,
Object** start,
- Object** end) {
+ Object** end)) {
// Return false is we are close to the stack limit.
StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
@@ -1397,10 +1563,10 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
static inline void Visit(Map* map, HeapObject* obj) {
Heap* heap = map->GetHeap();
FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->symbol_table()) {
+ if (fixed_array == heap->string_table()) {
heap->RecordObjectStats(
FIXED_ARRAY_TYPE,
- SYMBOL_TABLE_SUB_TYPE,
+ STRING_TABLE_SUB_TYPE,
fixed_array->Size());
}
ObjectStatsVisitBase(kVisitFixedArray, map, obj);
@@ -1581,10 +1747,10 @@ class RootMarkingVisitor : public ObjectVisitor {
};
-// Helper class for pruning the symbol table.
-class SymbolTableCleaner : public ObjectVisitor {
+// Helper class for pruning the string table.
+class StringTableCleaner : public ObjectVisitor {
public:
- explicit SymbolTableCleaner(Heap* heap)
+ explicit StringTableCleaner(Heap* heap)
: heap_(heap), pointers_removed_(0) { }
virtual void VisitPointers(Object** start, Object** end) {
@@ -1593,13 +1759,14 @@ class SymbolTableCleaner : public ObjectVisitor {
Object* o = *p;
if (o->IsHeapObject() &&
!Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
- // Check if the symbol being pruned is an external symbol. We need to
- // delete the associated external data as this symbol is going away.
+ // Check if the internalized string being pruned is external. We need to
+ // delete the associated external data as this string is going away.
// Since no objects have yet been moved we can safely access the map of
// the object.
if (o->IsExternalString() ||
- (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
+ (o->IsHeapObject() &&
+ HeapObject::cast(o)->map()->has_external_resource())) {
heap_->FinalizeExternalString(HeapObject::cast(*p));
}
// Set the entry to the_hole_value (as deleted).
@@ -1685,10 +1852,10 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
const MarkBit::CellType current_cell = cells[cell_index];
if (current_cell == 0) continue;
@@ -1745,14 +1912,24 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
}
-void MarkCompactCollector::MarkSymbolTable() {
- SymbolTable* symbol_table = heap()->symbol_table();
- // Mark the symbol table itself.
- MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
- SetMark(symbol_table, symbol_table_mark);
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
+ Object** p) {
+ Object* o = *p;
+ ASSERT(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ return !mark.Get();
+}
+
+
+void MarkCompactCollector::MarkStringTable() {
+ StringTable* string_table = heap()->string_table();
+ // Mark the string table itself.
+ MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
+ SetMark(string_table, string_table_mark);
// Explicitly mark the prefix.
MarkingVisitor marker(heap());
- symbol_table->IteratePrefix(&marker);
+ string_table->IteratePrefix(&marker);
ProcessMarkingDeque();
}
@@ -1762,8 +1939,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
// etc., and all objects reachable from them.
heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
- // Handle the symbol table specially.
- MarkSymbolTable();
+ // Handle the string table specially.
+ MarkStringTable();
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
@@ -1773,54 +1950,6 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
}
-void MarkCompactCollector::MarkObjectGroups() {
- List<ObjectGroup*>* object_groups =
- heap()->isolate()->global_handles()->object_groups();
-
- int last = 0;
- for (int i = 0; i < object_groups->length(); i++) {
- ObjectGroup* entry = object_groups->at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_marked = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- if (mark.Get()) {
- group_marked = true;
- break;
- }
- }
- }
-
- if (!group_marked) {
- (*object_groups)[last++] = entry;
- continue;
- }
-
- // An object in the group is marked, so mark as grey all white heap
- // objects in the group.
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- MarkObject(heap_object, mark);
- }
- }
-
- // Once the entire group has been colored grey, set the object group
- // to NULL so it won't be processed again.
- entry->Dispose();
- object_groups->at(i) = NULL;
- }
- object_groups->Rewind(last);
-}
-
-
void MarkCompactCollector::MarkImplicitRefGroups() {
List<ImplicitRefGroup*>* ref_groups =
heap()->isolate()->global_handles()->implicit_ref_groups();
@@ -1939,11 +2068,12 @@ void MarkCompactCollector::ProcessMarkingDeque() {
}
-void MarkCompactCollector::ProcessExternalMarking() {
+void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
- MarkObjectGroups();
+ heap()->isolate()->global_handles()->IterateObjectGroups(
+ visitor, &IsUnmarkedHeapObjectWithHeap);
MarkImplicitRefGroups();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
@@ -2022,7 +2152,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic.
- ProcessExternalMarking();
+ ProcessExternalMarking(&root_visitor);
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
@@ -2041,28 +2171,29 @@ void MarkCompactCollector::MarkLiveObjects() {
// Repeat host application specific marking to mark unmarked objects
// reachable from the weak roots.
- ProcessExternalMarking();
+ ProcessExternalMarking(&root_visitor);
AfterMarking();
}
void MarkCompactCollector::AfterMarking() {
- // Object literal map caches reference symbols (cache keys) and maps
+ // Object literal map caches reference strings (cache keys) and maps
// (cache values). At this point still useful maps have already been
// marked. Mark the keys for the alive values before we process the
- // symbol table.
+ // string table.
ProcessMapCaches();
- // Prune the symbol table removing all symbols only pointed to by the
- // symbol table. Cannot use symbol_table() here because the symbol
+ // Prune the string table removing all strings only pointed to by the
+ // string table. Cannot use string_table() here because the string
// table is marked.
- SymbolTable* symbol_table = heap()->symbol_table();
- SymbolTableCleaner v(heap());
- symbol_table->IterateElements(&v);
- symbol_table->ElementsRemoved(v.PointersRemoved());
+ StringTable* string_table = heap()->string_table();
+ StringTableCleaner v(heap());
+ string_table->IterateElements(&v);
+ string_table->ElementsRemoved(v.PointersRemoved());
heap()->external_string_table_.Iterate(&v);
heap()->external_string_table_.CleanUp();
+ heap()->error_object_list_.RemoveUnmarked(heap());
// Process the weak references.
MarkCompactWeakObjectRetainer mark_compact_object_retainer;
@@ -2162,7 +2293,7 @@ void MarkCompactCollector::ReattachInitialMaps() {
}
-void MarkCompactCollector::ClearNonLiveTransitions() {
+void MarkCompactCollector::ClearNonLiveReferences() {
HeapObjectIterator map_iterator(heap()->map_space());
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
@@ -2174,9 +2305,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
if (map->IsFreeSpace()) continue;
ASSERT(map->IsMap());
- // Only JSObject and subtypes have map transitions and back pointers.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
- if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
+ if (!map->CanTransition()) continue;
if (map_mark.Get() &&
map->attached_to_shared_function_info()) {
@@ -2188,6 +2317,12 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
ClearNonLivePrototypeTransitions(map);
ClearNonLiveMapTransitions(map, map_mark);
+
+ if (map_mark.Get()) {
+ ClearNonLiveDependentCode(map);
+ } else {
+ ClearAndDeoptimizeDependentCode(map);
+ }
}
}
@@ -2256,6 +2391,57 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
}
+void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCode* entries = map->dependent_code();
+ DependentCode::GroupStartIndexes starts(entries);
+ int number_of_entries = starts.number_of_entries();
+ if (number_of_entries == 0) return;
+ for (int i = 0; i < number_of_entries; i++) {
+ Code* code = entries->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ code->set_marked_for_deoptimization(true);
+ }
+ entries->clear_code_at(i);
+ }
+ map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCode* entries = map->dependent_code();
+ DependentCode::GroupStartIndexes starts(entries);
+ int number_of_entries = starts.number_of_entries();
+ if (number_of_entries == 0) return;
+ int new_number_of_entries = 0;
+ // Go through all groups, remove dead codes and compact.
+ for (int g = 0; g < DependentCode::kGroupCount; g++) {
+ int group_number_of_entries = 0;
+ for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+ Code* code = entries->code_at(i);
+ if (IsMarked(code) && !code->marked_for_deoptimization()) {
+ if (new_number_of_entries + group_number_of_entries != i) {
+ entries->set_code_at(new_number_of_entries +
+ group_number_of_entries, code);
+ }
+ Object** slot = entries->code_slot_at(new_number_of_entries +
+ group_number_of_entries);
+ RecordSlot(slot, slot, code);
+ group_number_of_entries++;
+ }
+ }
+ entries->set_number_of_entries(
+ static_cast<DependentCode::DependencyGroup>(g),
+ group_number_of_entries);
+ new_number_of_entries += group_number_of_entries;
+ }
+ for (int i = new_number_of_entries; i < number_of_entries; i++) {
+ entries->clear_code_at(i);
+ }
+}
+
+
void MarkCompactCollector::ProcessWeakMaps() {
Object* weak_map_obj = encountered_weak_maps();
while (weak_map_obj != Smi::FromInt(0)) {
@@ -2587,9 +2773,6 @@ void MarkCompactCollector::EvacuateNewSpace() {
size,
NEW_SPACE);
} else {
- // Process the dead object before we write a NULL into its header.
- LiveObjectList::ProcessNonLive(object);
-
// Mark dead objects in the new space with null in their map field.
Memory::Address_at(object->address()) = NULL;
}
@@ -2622,10 +2805,10 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
for (;
cell_index < last_cell_index;
cell_index++, cell_base += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(cell_base))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(cell_base))));
if (cells[cell_index] == 0) continue;
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
@@ -2768,6 +2951,11 @@ static void SweepPrecisely(PagedSpace* space,
space->identity() == CODE_SPACE);
ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+ double start_time = 0.0;
+ if (FLAG_print_cumulative_gc_stat) {
+ start_time = OS::TimeCurrentMillis();
+ }
+
MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptPrecisely();
@@ -2795,10 +2983,10 @@ static void SweepPrecisely(PagedSpace* space,
for (;
cell_index < last_cell_index;
cell_index++, object_address += 32 * kPointerSize) {
- ASSERT((unsigned)cell_index ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(object_address))));
+ ASSERT(static_cast<unsigned>(cell_index) ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(object_address))));
int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
int live_index = 0;
for ( ; live_objects != 0; live_objects--) {
@@ -2833,6 +3021,9 @@ static void SweepPrecisely(PagedSpace* space,
space->Free(free_start, static_cast<int>(p->area_end() - free_start));
}
p->ResetLiveBytes();
+ if (FLAG_print_cumulative_gc_stat) {
+ space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+ }
}
@@ -2989,7 +3180,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- LiveObjectList::IterateElements(&updating_visitor);
}
{ GCTracer::Scope gc_scope(tracer_,
@@ -3060,7 +3250,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
@@ -3096,12 +3286,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// Update pointer from the native contexts list.
updating_visitor.VisitPointer(heap_->native_contexts_list_address());
- heap_->symbol_table()->Iterate(&updating_visitor);
+ heap_->string_table()->Iterate(&updating_visitor);
// Update pointers from external string table.
heap_->UpdateReferencesInExternalStringTable(
&UpdateReferenceInExternalStringTableEntry);
+ // Update pointers in the new error object list.
+ heap_->error_object_list()->UpdateReferences();
+
if (!FLAG_watch_ic_patching) {
// Update JSFunction pointers from the runtime profiler.
heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
@@ -3125,6 +3318,11 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
ASSERT(migration_slots_buffer_ == NULL);
+}
+
+
+void MarkCompactCollector::ReleaseEvacuationCandidates() {
+ int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
if (!p->IsEvacuationCandidate()) continue;
@@ -3416,6 +3614,33 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
}
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+ FreeList* free_list,
+ Address start,
+ int size) {
+ if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+ return space->Free(start, size);
+ } else {
+ return size - free_list->Free(start, size);
+ }
+}
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_SEQUENTIALLY mode.
+template intptr_t MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ PagedSpace*, FreeList*, Page*);
+
+
+// Force instantiation of templatized SweepConservatively method for
+// SWEEP_IN_PARALLEL mode.
+template intptr_t MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
+ PagedSpace*, FreeList*, Page*);
+
+
// Sweeps a space conservatively. After this has been done the larger free
// spaces have been put on the free list and the smaller ones have been
// ignored and left untouched. A free space is always either ignored or put
@@ -3423,8 +3648,16 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
// because it means that any FreeSpace maps left actually describe a region of
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p) {
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
+ ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+ free_list != NULL) ||
+ (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+ free_list == NULL));
+
MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptConservatively();
@@ -3451,8 +3684,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
size_t size = block_address - p->area_start();
if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->area_start(),
- static_cast<int>(size)));
+ freed_bytes += Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
@@ -3461,8 +3694,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes += space->Free(p->area_start(),
- static_cast<int>(size));
+ freed_bytes += Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
+
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
@@ -3491,8 +3725,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
@@ -3506,8 +3740,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
// Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
@@ -3515,23 +3749,39 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
}
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ if (p->TryParallelSweeping()) {
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+ free_list->Concatenate(private_free_list);
+ }
+ }
+}
+
+
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE);
-
+ sweeper == LAZY_CONSERVATIVE ||
+ sweeper == PARALLEL_CONSERVATIVE ||
+ sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
PageIterator it(space);
intptr_t freed_bytes = 0;
int pages_swept = 0;
- intptr_t newspace_size = space->heap()->new_space()->Size();
bool lazy_sweeping_active = false;
bool unused_page_present = false;
while (it.has_next()) {
Page* p = it.next();
+ ASSERT(p->parallel_sweeping() == 0);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
@@ -3577,7 +3827,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
break;
}
@@ -3586,17 +3836,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
reinterpret_cast<intptr_t>(p));
}
- freed_bytes += SweepConservatively(space, p);
+ freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
- if (freed_bytes > 2 * newspace_size) {
- space->SetPagesToSweep(p->next_page());
- lazy_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
- freed_bytes);
- }
+ space->SetPagesToSweep(p->next_page());
+ lazy_sweeping_active = true;
+ break;
+ }
+ case CONCURRENT_CONSERVATIVE:
+ case PARALLEL_CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+ reinterpret_cast<intptr_t>(p));
}
+ p->set_parallel_sweeping(1);
+ space->IncreaseUnsweptFreeBytes(p);
break;
}
case PRECISE: {
@@ -3636,6 +3889,8 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
// Noncompacting collections simply sweep the spaces to clear the mark
@@ -3643,9 +3898,20 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
+
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
+ if (how_to_sweep == PARALLEL_CONSERVATIVE ||
+ how_to_sweep == CONCURRENT_CONSERVATIVE) {
+ // TODO(hpayer): fix race with concurrent sweeper
+ StartSweeperThreads();
+ }
+
+ if (how_to_sweep == PARALLEL_CONSERVATIVE) {
+ WaitUntilSweepingCompleted();
+ }
+
RemoveDeadInvalidatedCode();
SweepSpace(heap()->code_space(), PRECISE);
@@ -3660,6 +3926,10 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects();
+
+ if (how_to_sweep != CONCURRENT_CONSERVATIVE) {
+ FinalizeSweeping();
+ }
}
diff --git a/src/3rdparty/v8/src/mark-compact.h b/src/3rdparty/v8/src/mark-compact.h
index 0a4c1ea..b5d60fd 100644
--- a/src/3rdparty/v8/src/mark-compact.h
+++ b/src/3rdparty/v8/src/mark-compact.h
@@ -53,59 +53,59 @@ class Marking {
: heap_(heap) {
}
- static inline MarkBit MarkBitFrom(Address addr);
+ INLINE(static MarkBit MarkBitFrom(Address addr));
- static inline MarkBit MarkBitFrom(HeapObject* obj) {
+ INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
return MarkBitFrom(reinterpret_cast<Address>(obj));
}
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
- static inline bool IsImpossible(MarkBit mark_bit) {
+ INLINE(static bool IsImpossible(MarkBit mark_bit)) {
return !mark_bit.Get() && mark_bit.Next().Get();
}
// Black markbits: 10 - this is required by the sweeper.
static const char* kBlackBitPattern;
- static inline bool IsBlack(MarkBit mark_bit) {
+ INLINE(static bool IsBlack(MarkBit mark_bit)) {
return mark_bit.Get() && !mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
- static inline bool IsWhite(MarkBit mark_bit) {
+ INLINE(static bool IsWhite(MarkBit mark_bit)) {
return !mark_bit.Get();
}
// Grey markbits: 11
static const char* kGreyBitPattern;
- static inline bool IsGrey(MarkBit mark_bit) {
+ INLINE(static bool IsGrey(MarkBit mark_bit)) {
return mark_bit.Get() && mark_bit.Next().Get();
}
- static inline void MarkBlack(MarkBit mark_bit) {
+ INLINE(static void MarkBlack(MarkBit mark_bit)) {
mark_bit.Set();
mark_bit.Next().Clear();
}
- static inline void BlackToGrey(MarkBit markbit) {
+ INLINE(static void BlackToGrey(MarkBit markbit)) {
markbit.Next().Set();
}
- static inline void WhiteToGrey(MarkBit markbit) {
+ INLINE(static void WhiteToGrey(MarkBit markbit)) {
markbit.Set();
markbit.Next().Set();
}
- static inline void GreyToBlack(MarkBit markbit) {
+ INLINE(static void GreyToBlack(MarkBit markbit)) {
markbit.Next().Clear();
}
- static inline void BlackToGrey(HeapObject* obj) {
+ INLINE(static void BlackToGrey(HeapObject* obj)) {
BlackToGrey(MarkBitFrom(obj));
}
- static inline void AnyToGrey(MarkBit markbit) {
+ INLINE(static void AnyToGrey(MarkBit markbit)) {
markbit.Set();
markbit.Next().Set();
}
@@ -194,7 +194,7 @@ class MarkingDeque {
// Push the (marked) object on the marking stack if there is room,
// otherwise mark the object as overflowed and wait for a rescan of the
// heap.
- inline void PushBlack(HeapObject* object) {
+ INLINE(void PushBlack(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
Marking::BlackToGrey(object);
@@ -206,7 +206,7 @@ class MarkingDeque {
}
}
- inline void PushGrey(HeapObject* object) {
+ INLINE(void PushGrey(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
@@ -216,7 +216,7 @@ class MarkingDeque {
}
}
- inline HeapObject* Pop() {
+ INLINE(HeapObject* Pop()) {
ASSERT(!IsEmpty());
top_ = ((top_ - 1) & mask_);
HeapObject* object = array_[top_];
@@ -224,7 +224,7 @@ class MarkingDeque {
return object;
}
- inline void UnshiftGrey(HeapObject* object) {
+ INLINE(void UnshiftGrey(HeapObject* object)) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
SetOverflowed();
@@ -366,10 +366,10 @@ class SlotsBuffer {
return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
}
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address,
- ObjectSlot slot,
- AdditionMode mode) {
+ INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ ObjectSlot slot,
+ AdditionMode mode)) {
SlotsBuffer* buffer = *buffer_address;
if (buffer == NULL || buffer->IsFull()) {
if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
@@ -423,6 +423,10 @@ class CodeFlusher {
if (GetNextCandidate(shared_info) == NULL) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
shared_function_info_candidates_head_ = shared_info;
+ } else {
+ // TODO(mstarzinger): Active in release mode to flush out problems.
+ // Should be turned back into an ASSERT or removed completely.
+ CHECK(ContainsCandidate(shared_info));
}
}
@@ -434,6 +438,9 @@ class CodeFlusher {
}
}
+ bool ContainsCandidate(SharedFunctionInfo* shared_info);
+
+ void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictCandidate(JSFunction* function);
void ProcessCandidates() {
@@ -587,18 +594,30 @@ class MarkCompactCollector {
enum SweeperType {
CONSERVATIVE,
LAZY_CONSERVATIVE,
+ PARALLEL_CONSERVATIVE,
+ CONCURRENT_CONSERVATIVE,
PRECISE
};
+ enum SweepingParallelism {
+ SWEEP_SEQUENTIALLY,
+ SWEEP_IN_PARALLEL
+ };
+
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
+ void VerifyWeakEmbeddedMapsInOptimizedCode();
+ void VerifyOmittedPrototypeChecks();
#endif
// Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes.
- static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+ template<SweepingParallelism type>
+ static intptr_t SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))->
@@ -615,7 +634,7 @@ class MarkCompactCollector {
IsEvacuationCandidate();
}
- void EvictEvacuationCandidate(Page* page) {
+ INLINE(void EvictEvacuationCandidate(Page* page)) {
if (FLAG_trace_fragmentation) {
PrintF("Page %p is too popular. Disabling evacuation.\n",
reinterpret_cast<void*>(page));
@@ -658,10 +677,32 @@ class MarkCompactCollector {
void ClearMarkbits();
+ bool abort_incremental_marking() const { return abort_incremental_marking_; }
+
bool is_compacting() const { return compacting_; }
MarkingParity marking_parity() { return marking_parity_; }
+ // Concurrent and parallel sweeping support.
+ void SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list);
+
+ void WaitUntilSweepingCompleted();
+
+ intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+ bool AreSweeperThreadsActivated();
+
+ bool IsConcurrentSweepingInProgress();
+
+ void FinalizeSweeping();
+
+ // Parallel marking support.
+ void MarkInParallel();
+
+ void WaitUntilMarkingCompleted();
+
private:
MarkCompactCollector();
~MarkCompactCollector();
@@ -670,6 +711,9 @@ class MarkCompactCollector {
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
+ void ReleaseEvacuationCandidates();
+
+ void StartSweeperThreads();
#ifdef DEBUG
enum CollectorState {
@@ -702,6 +746,9 @@ class MarkCompactCollector {
bool was_marked_incrementally_;
+ // True if concurrent or parallel sweeping is currently in progress.
+ bool sweeping_pending_;
+
// A pointer to the current stack-allocated GC tracer object during a full
// collection (NULL before and after).
GCTracer* tracer_;
@@ -750,13 +797,9 @@ class MarkCompactCollector {
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
- // Mark the symbol table specially. References to symbols from the
- // symbol table are weak.
- void MarkSymbolTable();
-
- // Mark objects in object groups that have at least one object in the
- // group marked.
- void MarkObjectGroups();
+ // Mark the string table specially. References to internalized strings from
+ // the string table are weak.
+ void MarkStringTable();
// Mark objects in implicit references groups if their parent object
// is marked.
@@ -764,7 +807,7 @@ class MarkCompactCollector {
// Mark all objects which are reachable due to host application
// logic like object groups or implicit references' groups.
- void ProcessExternalMarking();
+ void ProcessExternalMarking(RootMarkingVisitor* visitor);
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
@@ -788,13 +831,17 @@ class MarkCompactCollector {
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
+ static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
- void ClearNonLiveTransitions();
+ void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+ void ClearAndDeoptimizeDependentCode(Map* map);
+ void ClearNonLiveDependentCode(Map* map);
+
// Marking detaches initial maps from SharedFunctionInfo objects
// to make this reference weak. We need to reattach initial maps
// back after collection. This is either done during
diff --git a/src/3rdparty/v8/src/marking-thread.cc b/src/3rdparty/v8/src/marking-thread.cc
new file mode 100644
index 0000000..ac64381
--- /dev/null
+++ b/src/3rdparty/v8/src/marking-thread.cc
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "marking-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+MarkingThread::MarkingThread(Isolate* isolate)
+ : Thread("MarkingThread"),
+ isolate_(isolate),
+ heap_(isolate->heap()),
+ start_marking_semaphore_(OS::CreateSemaphore(0)),
+ end_marking_semaphore_(OS::CreateSemaphore(0)),
+ stop_semaphore_(OS::CreateSemaphore(0)) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+ id_ = NoBarrier_AtomicIncrement(&id_counter_, 1);
+}
+
+
+Atomic32 MarkingThread::id_counter_ = -1;
+
+
+void MarkingThread::Run() {
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+
+ while (true) {
+ start_marking_semaphore_->Wait();
+
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ return;
+ }
+
+ end_marking_semaphore_->Signal();
+ }
+}
+
+
+void MarkingThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ start_marking_semaphore_->Signal();
+ stop_semaphore_->Wait();
+}
+
+
+void MarkingThread::StartMarking() {
+ start_marking_semaphore_->Signal();
+}
+
+
+void MarkingThread::WaitForMarkingThread() {
+ end_marking_semaphore_->Wait();
+}
+
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/inspector.cc b/src/3rdparty/v8/src/marking-thread.h
index 833d338..9efa3af 100644
--- a/src/3rdparty/v8/src/inspector.cc
+++ b/src/3rdparty/v8/src/marking-thread.h
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,39 +25,47 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_MARKING_THREAD_H_
+#define V8_MARKING_THREAD_H_
-#include "v8.h"
-#include "inspector.h"
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+#include "spaces.h"
+
+#include "heap.h"
namespace v8 {
namespace internal {
-#ifdef INSPECTOR
-
-//============================================================================
-// The Inspector.
+class MarkingThread : public Thread {
+ public:
+ explicit MarkingThread(Isolate* isolate);
-void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) {
- // Dump the object pointer.
- OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj));
- if (obj->IsHeapObject()) {
- HeapObject* hobj = HeapObject::cast(obj);
- OS::FPrint(out, " size %d :", hobj->Size());
- }
+ void Run();
+ void Stop();
+ void StartMarking();
+ void WaitForMarkingThread();
- // Dump each object classification that matches this object.
-#define FOR_EACH_TYPE(type) \
- if (obj->Is##type()) { \
- OS::FPrint(out, " %s", #type); \
+ ~MarkingThread() {
+ delete start_marking_semaphore_;
+ delete end_marking_semaphore_;
+ delete stop_semaphore_;
}
- OBJECT_TYPE_LIST(FOR_EACH_TYPE)
- HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE)
-#undef FOR_EACH_TYPE
-}
-
-#endif // INSPECTOR
+ private:
+ Isolate* isolate_;
+ Heap* heap_;
+ Semaphore* start_marking_semaphore_;
+ Semaphore* end_marking_semaphore_;
+ Semaphore* stop_semaphore_;
+ volatile AtomicWord stop_thread_;
+ int id_;
+ static Atomic32 id_counter_;
+};
} } // namespace v8::internal
+#endif // V8_MARKING_THREAD_H_
diff --git a/src/3rdparty/v8/src/math.js b/src/3rdparty/v8/src/math.js
index aee56af..4686328 100644
--- a/src/3rdparty/v8/src/math.js
+++ b/src/3rdparty/v8/src/math.js
@@ -131,19 +131,16 @@ function MathMax(arg1, arg2) { // length == 2
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- if (length == 0) {
- return -1/0; // Compiler constant-folds this to -Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = -1/0; // Compiler constant-folds this to -Infinity.
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
- if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n > r ||
+ (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
+ r = n;
+ }
}
return r;
}
@@ -164,19 +161,16 @@ function MathMin(arg1, arg2) { // length == 2
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- if (length == 0) {
- return 1/0; // Compiler constant-folds this to Infinity.
- }
- var r = arg1;
- if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
- if (NUMBER_IS_NAN(r)) return r;
- for (var i = 1; i < length; i++) {
+ var r = 1/0; // Compiler constant-folds this to Infinity.
+ for (var i = 0; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
- if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
// Smi or a heap number.
- if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
+ if (NUMBER_IS_NAN(n) || n < r ||
+ (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
+ r = n;
+ }
}
return r;
}
diff --git a/src/3rdparty/v8/src/messages.cc b/src/3rdparty/v8/src/messages.cc
index ce965fc..de18a4b 100644
--- a/src/3rdparty/v8/src/messages.cc
+++ b/src/3rdparty/v8/src/messages.cc
@@ -38,14 +38,15 @@ namespace internal {
// If no message listeners have been registered this one is called
// by default.
-void MessageHandler::DefaultMessageReport(const MessageLocation* loc,
+void MessageHandler::DefaultMessageReport(Isolate* isolate,
+ const MessageLocation* loc,
Handle<Object> message_obj) {
- SmartArrayPointer<char> str = GetLocalizedMessage(message_obj);
+ SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj);
if (loc == NULL) {
PrintF("%s\n", *str);
} else {
- HandleScope scope;
- Handle<Object> data(loc->script()->name());
+ HandleScope scope(isolate);
+ Handle<Object> data(loc->script()->name(), isolate);
SmartArrayPointer<char> data_str;
if (data->IsString())
data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS);
@@ -61,7 +62,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject(
Vector< Handle<Object> > args,
Handle<String> stack_trace,
Handle<JSArray> stack_frames) {
- Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type);
+ Handle<String> type_handle = FACTORY->InternalizeUtf8String(type);
Handle<FixedArray> arguments_elements =
FACTORY->NewFixedArray(args.length());
for (int i = 0; i < args.length(); i++) {
@@ -112,7 +113,7 @@ void MessageHandler::ReportMessage(Isolate* isolate,
if (isolate->has_pending_exception()) {
isolate->pending_exception()->ToObject(&exception_object);
}
- Handle<Object> exception_handle(exception_object);
+ Handle<Object> exception_handle(exception_object, isolate);
Isolate::ExceptionScope exception_scope(isolate);
isolate->clear_pending_exception();
@@ -124,21 +125,25 @@ void MessageHandler::ReportMessage(Isolate* isolate,
v8::NeanderArray global_listeners(FACTORY->message_listeners());
int global_length = global_listeners.length();
if (global_length == 0) {
- DefaultMessageReport(loc, message);
+ DefaultMessageReport(isolate, loc, message);
if (isolate->has_scheduled_exception()) {
isolate->clear_scheduled_exception();
}
} else {
for (int i = 0; i < global_length; i++) {
- HandleScope scope;
+ HandleScope scope(isolate);
if (global_listeners.get(i)->IsUndefined()) continue;
- Handle<Foreign> callback_obj(Foreign::cast(global_listeners.get(i)));
+ v8::NeanderObject listener(JSObject::cast(global_listeners.get(i)));
+ Handle<Foreign> callback_obj(Foreign::cast(listener.get(0)));
v8::MessageCallback callback =
FUNCTION_CAST<v8::MessageCallback>(callback_obj->foreign_address());
+ Handle<Object> callback_data(listener.get(1), isolate);
{
// Do not allow exceptions to propagate.
v8::TryCatch try_catch;
- callback(api_message_obj, api_exception_obj);
+ callback(api_message_obj, callback_data->IsUndefined()
+ ? api_exception_obj
+ : v8::Utils::ToLocal(callback_data));
}
if (isolate->has_scheduled_exception()) {
isolate->clear_scheduled_exception();
@@ -148,27 +153,30 @@ void MessageHandler::ReportMessage(Isolate* isolate,
}
-Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
- Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage");
+Handle<String> MessageHandler::GetMessage(Isolate* isolate,
+ Handle<Object> data) {
+ Factory* factory = isolate->factory();
+ Handle<String> fmt_str =
+ factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
Handle<JSFunction> fun =
Handle<JSFunction>(
JSFunction::cast(
- Isolate::Current()->js_builtins_object()->
+ isolate->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
- Handle<Object> argv[] = { Handle<Object>(message->type()),
- Handle<Object>(message->arguments()) };
+ Handle<Object> argv[] = { Handle<Object>(message->type(), isolate),
+ Handle<Object>(message->arguments(), isolate) };
bool caught_exception;
Handle<Object> result =
Execution::TryCall(fun,
- Isolate::Current()->js_builtins_object(),
+ isolate->js_builtins_object(),
ARRAY_SIZE(argv),
argv,
&caught_exception);
if (caught_exception || !result->IsString()) {
- return FACTORY->LookupAsciiSymbol("<error>");
+ return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
}
Handle<String> result_string = Handle<String>::cast(result);
// A string that has been obtained from JS code in this way is
@@ -182,9 +190,10 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
+ Isolate* isolate,
Handle<Object> data) {
- HandleScope scope;
- return GetMessage(data)->ToCString(DISALLOW_NULLS);
+ HandleScope scope(isolate);
+ return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS);
}
diff --git a/src/3rdparty/v8/src/messages.h b/src/3rdparty/v8/src/messages.h
index 358509e..3361abe 100644
--- a/src/3rdparty/v8/src/messages.h
+++ b/src/3rdparty/v8/src/messages.h
@@ -102,10 +102,12 @@ class MessageHandler {
MessageLocation* loc,
Handle<Object> message);
- static void DefaultMessageReport(const MessageLocation* loc,
+ static void DefaultMessageReport(Isolate* isolate,
+ const MessageLocation* loc,
Handle<Object> message_obj);
- static Handle<String> GetMessage(Handle<Object> data);
- static SmartArrayPointer<char> GetLocalizedMessage(Handle<Object> data);
+ static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data);
+ static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate,
+ Handle<Object> data);
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/messages.js b/src/3rdparty/v8/src/messages.js
index f04bed9..14ba73f 100644
--- a/src/3rdparty/v8/src/messages.js
+++ b/src/3rdparty/v8/src/messages.js
@@ -190,8 +190,9 @@ function NoSideEffectToString(obj) {
if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
if (IS_UNDEFINED(obj)) return 'undefined';
if (IS_NULL(obj)) return 'null';
+ if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString);
if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = obj.constructor;
+ var constructor = %GetDataProperty(obj, "constructor");
if (typeof constructor == "function") {
var constructorName = constructor.name;
if (IS_STRING(constructorName) && constructorName !== "") {
@@ -251,13 +252,8 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) {
- args = [];
- }
- var e = new constructor(FormatMessage(type, args));
- e.type = type;
- e.arguments = args;
- return e;
+ if (IS_UNDEFINED(args)) args = [];
+ return new constructor(FormatMessage(type, args));
}
@@ -566,7 +562,7 @@ function ScriptNameOrSourceURL() {
%_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
if (match) {
this.cachedNameOrSourceURL =
- SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
+ %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
}
}
return this.cachedNameOrSourceURL;
@@ -755,29 +751,6 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-// Defines accessors for a property that is calculated the first time
-// the property is read.
-function DefineOneShotAccessor(obj, name, fun) {
- // Note that the accessors consistently operate on 'obj', not 'this'.
- // Since the object may occur in someone else's prototype chain we
- // can't rely on 'this' being the same as 'obj'.
- var value;
- var value_factory = fun;
- var getter = function() {
- if (value_factory == null) {
- return value;
- }
- value = value_factory(obj);
- value_factory = null;
- return value;
- };
- var setter = function(v) {
- value_factory = null;
- value = v;
- };
- %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
-}
-
function CallSite(receiver, fun, pos) {
this.receiver = receiver;
this.fun = fun;
@@ -847,7 +820,8 @@ function CallSiteGetMethodName() {
%_CallFunction(this.receiver,
ownName,
ObjectLookupSetter) === this.fun ||
- this.receiver[ownName] === this.fun)) {
+ (IS_OBJECT(this.receiver) &&
+ %GetDataProperty(this.receiver, ownName) === this.fun))) {
// To handle DontEnum properties we guess that the method has
// the same name as the function.
return ownName;
@@ -856,8 +830,8 @@ function CallSiteGetMethodName() {
for (var prop in this.receiver) {
if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun ||
%_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun ||
- (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) &&
- this.receiver[prop] === this.fun)) {
+ (IS_OBJECT(this.receiver) &&
+ %GetDataProperty(this.receiver, prop) === this.fun)) {
// If we find more than one match bail out to avoid confusion.
if (name) {
return null;
@@ -910,10 +884,10 @@ function CallSiteGetPosition() {
}
function CallSiteIsConstructor() {
- var constructor = this.receiver ? this.receiver.constructor : null;
- if (!constructor) {
- return false;
- }
+ var receiver = this.receiver;
+ var constructor =
+ IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null;
+ if (!constructor) return false;
return this.fun === constructor;
}
@@ -960,12 +934,14 @@ function CallSiteToString() {
var typeName = GetTypeName(this, true);
var methodName = this.getMethodName();
if (functionName) {
- if (typeName && functionName.indexOf(typeName) != 0) {
+ if (typeName &&
+ %_CallFunction(functionName, typeName, StringIndexOf) != 0) {
line += typeName + ".";
}
line += functionName;
- if (methodName && functionName.lastIndexOf("." + methodName) !=
- functionName.length - methodName.length - 1) {
+ if (methodName &&
+ (%_CallFunction(functionName, "." + methodName, StringIndexOf) !=
+ functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
} else {
@@ -1043,17 +1019,37 @@ function FormatEvalOrigin(script) {
return eval_origin;
}
-function FormatStackTrace(error, frames) {
- var lines = [];
+
+function FormatErrorString(error) {
try {
- lines.push(error.toString());
+ return %_CallFunction(error, ErrorToString);
} catch (e) {
try {
- lines.push("<error: " + e + ">");
+ return "<error: " + e + ">";
} catch (ee) {
- lines.push("<error>");
+ return "<error>";
}
}
+}
+
+
+function GetStackFrames(raw_stack) {
+ var frames = new InternalArray();
+ for (var i = 0; i < raw_stack.length; i += 4) {
+ var recv = raw_stack[i];
+ var fun = raw_stack[i + 1];
+ var code = raw_stack[i + 2];
+ var pc = raw_stack[i + 3];
+ var pos = %FunctionGetPositionForOffset(code, pc);
+ frames.push(new CallSite(recv, fun, pos));
+ }
+ return frames;
+}
+
+
+function FormatStackTrace(error_string, frames) {
+ var lines = new InternalArray();
+ lines.push(error_string);
for (var i = 0; i < frames.length; i++) {
var frame = frames[i];
var line;
@@ -1069,25 +1065,9 @@ function FormatStackTrace(error, frames) {
}
lines.push(" at " + line);
}
- return lines.join("\n");
+ return %_CallFunction(lines, "\n", ArrayJoin);
}
-function FormatRawStackTrace(error, raw_stack) {
- var frames = [ ];
- for (var i = 0; i < raw_stack.length; i += 4) {
- var recv = raw_stack[i];
- var fun = raw_stack[i + 1];
- var code = raw_stack[i + 2];
- var pc = raw_stack[i + 3];
- var pos = %FunctionGetPositionForOffset(code, pc);
- frames.push(new CallSite(recv, fun, pos));
- }
- if (IS_FUNCTION($Error.prepareStackTrace)) {
- return $Error.prepareStackTrace(error, frames);
- } else {
- return FormatStackTrace(error, frames);
- }
-}
function GetTypeName(obj, requireConstructor) {
var constructor = obj.receiver.constructor;
@@ -1103,18 +1083,58 @@ function GetTypeName(obj, requireConstructor) {
return constructorName;
}
+
+// Flag to prevent recursive call of Error.prepareStackTrace.
+var formatting_custom_stack_trace = false;
+
+
function captureStackTrace(obj, cons_opt) {
var stackTraceLimit = $Error.stackTraceLimit;
if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
stackTraceLimit = 10000;
}
- var raw_stack = %CollectStackTrace(obj,
- cons_opt ? cons_opt : captureStackTrace,
- stackTraceLimit);
- DefineOneShotAccessor(obj, 'stack', function (obj) {
- return FormatRawStackTrace(obj, raw_stack);
- });
+ var stack = %CollectStackTrace(obj,
+ cons_opt ? cons_opt : captureStackTrace,
+ stackTraceLimit);
+
+ // Don't be lazy if the error stack formatting is custom (observable).
+ if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) {
+ var array = [];
+ %MoveArrayContents(GetStackFrames(stack), array);
+ formatting_custom_stack_trace = true;
+ try {
+ obj.stack = $Error.prepareStackTrace(obj, array);
+ } catch (e) {
+ throw e; // The custom formatting function threw. Rethrow.
+ } finally {
+ formatting_custom_stack_trace = false;
+ }
+ return;
+ }
+
+ var error_string = FormatErrorString(obj);
+ // Note that 'obj' and 'this' maybe different when called on objects that
+ // have the error object on its prototype chain. The getter replaces itself
+ // with a data property as soon as the stack trace has been formatted.
+ // The getter must not change the object layout as it may be called after GC.
+ var getter = function() {
+ if (IS_STRING(stack)) return stack;
+ // Stack is still a raw array awaiting to be formatted.
+ stack = FormatStackTrace(error_string, GetStackFrames(stack));
+ // Release context value.
+ error_string = void 0;
+ return stack;
+ };
+ %MarkOneShotGetter(getter);
+
+ // The 'stack' property of the receiver is set as data property. If
+ // the receiver is the same as holder, this accessor pair is replaced.
+ var setter = function(v) {
+ %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ };
+
+ %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
}
@@ -1153,8 +1173,6 @@ function SetUpError() {
// object. This avoids going through getters and setters defined
// on prototype objects.
%IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
- %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
@@ -1214,7 +1232,6 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
- var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
@@ -1249,4 +1266,46 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
-var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
+function SetUpStackOverflowBoilerplate() {
+ var boilerplate = MakeRangeError('stack_overflow', []);
+
+ // The raw stack trace is stored as hidden property of the copy of this
+ // boilerplate error object. Note that the receiver 'this' may not be that
+ // error object copy, but can be found on the prototype chain of 'this'.
+ // When the stack trace is formatted, this accessor property is replaced by
+ // a data property.
+ var error_string = boilerplate.name + ": " + boilerplate.message;
+
+ // The getter must not change the object layout as it may be called after GC.
+ function getter() {
+ var holder = this;
+ while (!IS_ERROR(holder)) {
+ holder = %GetPrototype(holder);
+ if (holder == null) return MakeSyntaxError('illegal_access', []);
+ }
+ var stack = %GetOverflowedStackTrace(holder);
+ if (IS_STRING(stack)) return stack;
+ if (IS_ARRAY(stack)) {
+ var result = FormatStackTrace(error_string, GetStackFrames(stack));
+ %SetOverflowedStackTrace(holder, result);
+ return result;
+ }
+ return void 0;
+ }
+ %MarkOneShotGetter(getter);
+
+ // The 'stack' property of the receiver is set as data property. If
+ // the receiver is the same as holder, this accessor pair is replaced.
+ function setter(v) {
+ %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
+ // Release the stack trace that is stored as hidden property, if exists.
+ %SetOverflowedStackTrace(this, void 0);
+ }
+
+ %DefineOrRedefineAccessorProperty(
+ boilerplate, 'stack', getter, setter, DONT_ENUM);
+
+ return boilerplate;
+}
+
+var kStackOverflowBoilerplate = SetUpStackOverflowBoilerplate();
diff --git a/src/3rdparty/v8/src/mips/assembler-mips-inl.h b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
index 3e726a7..0499d36 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips-inl.h
+++ b/src/3rdparty/v8/src/mips/assembler-mips-inl.h
@@ -1,3 +1,4 @@
+
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -65,7 +66,7 @@ Operand::Operand(const ExternalReference& f) {
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
@@ -79,9 +80,36 @@ bool Operand::is_reg() const {
}
+int Register::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return kMaxNumAllocatableRegisters;
+ } else {
+ return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
+ }
+}
+
+
+int DoubleRegister::NumRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return FPURegister::kMaxNumRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+ if (CpuFeatures::IsSupported(FPU)) {
+ return FPURegister::kMaxNumAllocatableRegisters;
+ } else {
+ return 1;
+ }
+}
+
+
int FPURegister::ToAllocationIndex(FPURegister reg) {
ASSERT(reg.code() % 2 == 0);
- ASSERT(reg.code() / 2 < kNumAllocatableRegisters);
+ ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
ASSERT(reg.is_valid());
ASSERT(!reg.is(kDoubleRegZero));
ASSERT(!reg.is(kLithiumScratchDouble));
@@ -231,6 +259,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
+static const int kNoCodeAgeSequenceLength = 7;
+
+Code* RelocInfo::code_age_stub() {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ return Code::GetCodeFromTargetAddress(
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub) {
+ ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+ Memory::Address_at(pc_ + Assembler::kInstrSize *
+ (kNoCodeAgeSequenceLength - 1)) =
+ stub->instruction_start();
+}
+
+
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -302,6 +348,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -328,6 +376,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+ StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.cc b/src/3rdparty/v8/src/mips/assembler-mips.cc
index 4ce924d..962255d 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/assembler-mips.cc
@@ -50,6 +50,12 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
@@ -73,6 +79,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
}
+const char* DoubleRegister::AllocationIndexToString(int index) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
+ } else {
+ ASSERT(index == 0);
+ return "sfpd0";
+ }
+}
+
+
void CpuFeatures::Probe() {
unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
CpuFeaturesImpliedByCompiler());
@@ -221,7 +254,7 @@ Operand::Operand(Handle<Object> handle) {
} else {
// No relocation needed.
imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE;
+ rmode_ = RelocInfo::NONE32;
}
}
@@ -267,44 +300,11 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-// Spare buffer.
-static const int kMinimalBufferSize = 4 * KB;
-
-
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
-
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -323,18 +323,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
@@ -601,7 +589,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) {
int32_t Assembler::GetBranchOffset(Instr instr) {
ASSERT(IsBranch(instr));
- return ((int16_t)(instr & kImm16Mask)) << 2;
+ return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
}
@@ -734,7 +722,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
@@ -745,7 +733,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
} else {
- uint32_t imm28 = (uint32_t)buffer_ + target_pos;
+ uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
@@ -850,7 +838,7 @@ bool Assembler::is_near(Label* L) {
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
- return rmode != RelocInfo::NONE;
+ return !RelocInfo::IsNone(rmode);
}
void Assembler::GenInstrRegister(Opcode opcode,
@@ -894,6 +882,20 @@ void Assembler::GenInstrRegister(Opcode opcode,
void Assembler::GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func) {
+ ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+ ASSERT(CpuFeatures::IsEnabled(FPU));
+ Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
@@ -997,7 +999,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
- uint32_t imm = (uint32_t)buffer_ + target_pos;
+ uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
@@ -1132,7 +1134,8 @@ void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
GenInstrJump(J, target >> 2);
@@ -1153,7 +1156,8 @@ void Assembler::jal(int32_t target) {
#ifdef DEBUG
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
positions_recorder()->WriteRecordedPositions();
@@ -1172,8 +1176,8 @@ void Assembler::jalr(Register rs, Register rd) {
void Assembler::j_or_jr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
} else {
@@ -1185,8 +1189,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
void Assembler::jal_or_jalr(int32_t target, Register rs) {
// Get pc of delay slot.
uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0;
-
+ bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ (kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
} else {
@@ -1696,6 +1700,12 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
}
+void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+ FPURegister ft) {
+ GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+}
+
+
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
}
@@ -1945,7 +1955,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
return 2; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
- if ((int32_t)imm28 == kEndOfJumpChain) {
+ if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
imm28 += pc_delta;
@@ -2035,7 +2045,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
}
- if (rinfo.rmode() != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
@@ -2195,9 +2205,10 @@ void Assembler::set_target_address_at(Address pc, Address target) {
Instr instr3 = instr_at(pc + 2 * kInstrSize);
uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range =
- ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift;
+ bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >>
+ (kImm26Bits + kImmFieldShift)) == 0;
+ uint32_t target_field =
+ static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift;
bool patched_jump = false;
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
diff --git a/src/3rdparty/v8/src/mips/assembler-mips.h b/src/3rdparty/v8/src/mips/assembler-mips.h
index fd2ff0d..d108edc 100644
--- a/src/3rdparty/v8/src/mips/assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/assembler-mips.h
@@ -72,20 +72,23 @@ namespace internal {
// Core register.
struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
- static const int kNumAllocatableRegisters = 14; // v0 through t7.
+ static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
+ static const int kGPRsPerNonFPUDouble = 2;
+
+ inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
return reg.code() - 2; // zero_reg and 'at' are skipped.
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index + 2); // zero_reg and 'at' are skipped.
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"v0",
"v1",
@@ -186,7 +189,7 @@ Register ToRegister(int num);
// Coprocessor register.
struct FPURegister {
- static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
// TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
// to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
@@ -197,44 +200,25 @@ struct FPURegister {
// f28: 0.0
// f30: scratch register.
static const int kNumReservedRegisters = 2;
- static const int kNumAllocatableRegisters = kNumRegisters / 2 -
+ static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
kNumReservedRegisters;
-
+ inline static int NumRegisters();
+ inline static int NumAllocatableRegisters();
inline static int ToAllocationIndex(FPURegister reg);
+ static const char* AllocationIndexToString(int index);
static FPURegister FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
return from_code(index * 2);
}
- static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- }
-
static FPURegister from_code(int code) {
FPURegister r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
FPURegister low() const {
// Find low reg of a Double-reg pair, which is the reg itself.
@@ -316,6 +300,9 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
+const Register sfpd_lo = { kRegister_t6_Code };
+const Register sfpd_hi = { kRegister_t7_Code };
+
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
@@ -361,7 +348,7 @@ class Operand BASE_EMBEDDED {
public:
// Immediate.
INLINE(explicit Operand(int32_t immediate,
- RelocInfo::Mode rmode = RelocInfo::NONE));
+ RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
INLINE(explicit Operand(Object** opp));
@@ -503,6 +490,7 @@ class CpuFeatures : public AllStatic {
static unsigned supported_;
static unsigned found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -523,7 +511,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -663,7 +651,9 @@ class Assembler : public AssemblerBase {
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ // Code aging
+ CODE_AGE_MARKER_NOP = 6
};
// Type == 0 is the default non-marking nop. For mips this is a
@@ -816,6 +806,7 @@ class Assembler : public AssemblerBase {
void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
@@ -941,8 +932,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int32_t pc_offset() const { return pc_ - buffer_; }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1085,13 +1074,6 @@ class Assembler : public AssemblerBase {
}
private:
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
@@ -1102,7 +1084,6 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
- byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
@@ -1167,6 +1148,13 @@ class Assembler : public AssemblerBase {
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ FPURegister fr,
+ FPURegister ft,
+ FPURegister fs,
+ FPURegister fd,
+ SecondaryField func = NULLSF);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
Register rt,
FPURegister fs,
diff --git a/src/3rdparty/v8/src/mips/builtins-mips.cc b/src/3rdparty/v8/src/mips/builtins-mips.cc
index 0342e65..58c213b 100644
--- a/src/3rdparty/v8/src/mips/builtins-mips.cc
+++ b/src/3rdparty/v8/src/mips/builtins-mips.cc
@@ -555,34 +555,62 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
+ // -- a2 : type info cell
// -- ra : return address
// -- sp[...]: constructor arguments
// -----------------------------------
- Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
- __ And(t0, a2, Operand(kSmiTagMask));
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ And(t0, a3, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function (3)",
t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, t0);
+ __ GetObjectType(a1, a3, t0);
__ Assert(eq, "Unexpected initial map for Array function (4)",
t0, Operand(MAP_TYPE));
+
+ // We should either have undefined in a2 or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel));
+ __ lw(a3, FieldMemOperand(a2, 0));
+ __ Assert(eq, "Expected property cell in register a3",
+ a3, Operand(global_property_cell_map));
+ __ bind(&okay_here);
}
- // Run the native code for the Array function called as a constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
+ __ bind(&not_zero_case);
+ __ Branch(&not_one_case, gt, a0, Operand(1));
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as a constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
@@ -698,7 +726,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into a2, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(argument, Heap::kempty_stringRootIndex);
__ Drop(1);
__ Branch(&argument_is_string);
@@ -1072,9 +1100,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a3, a3);
+ __ GetObjectType(v0, a1, a3);
__ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Symbols are "objects".
+ __ lbu(a3, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+ __ Branch(&exit, eq, a3, Operand(SYMBOL_TYPE));
+
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -1171,6 +1203,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->heap()->undefined_value(), masm->isolate());
+ __ li(a2, Operand(undefined_sentinel));
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
} else {
@@ -1255,6 +1291,66 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+ // For now, we are relying on the fact that make_code_young doesn't do any
+ // garbage collection which allows us to save/restore the registers without
+ // worrying about which of them contain pointers. We also don't build an
+ // internal frame to make the code faster, since we shouldn't have to do stack
+ // crawls in MakeCodeYoung. This seems a bit fragile.
+
+ __ mov(a0, ra);
+ // Adjust a0 to point to the head of the PlatformCodeAge sequence
+ __ Subu(a0, a0,
+ Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
+ // Restore the original return address of the function
+ __ mov(ra, at);
+
+ // The following registers must be saved and restored when calling through to
+ // the runtime:
+ // a0 - contains return address (beginning of patch sequence)
+ // a1 - function object
+ RegList saved_regs =
+ (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ MultiPush(saved_regs);
+ __ PrepareCallCFunction(1, 0, a1);
+ __ CallCFunction(
+ ExternalReference::get_make_code_young_function(masm->isolate()), 1);
+ __ MultiPop(saved_regs);
+ __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+} \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
+ MacroAssembler* masm) { \
+ GenerateMakeCodeYoungAgainCommon(masm); \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ MultiPush(kJSCallerSaved | kCalleeSaved);
+ // Pass the function and deoptimization type to the runtime system.
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ MultiPop(kJSCallerSaved | kCalleeSaved);
+ }
+
+ __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state
+ __ Jump(ra); // Jump to miss handler
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
@@ -1371,7 +1467,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// a0: actual number of arguments
// a1: function
Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION.
+ __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
{ Label convert_to_object, use_global_receiver, patch_receiver;
// Change context eagerly in case we need the global receiver.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -1425,7 +1521,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ li(t0, Operand(0, RelocInfo::NONE));
+ __ li(t0, Operand(0, RelocInfo::NONE32));
__ Branch(&patch_receiver);
// Use the global receiver object from the called function as the
@@ -1448,11 +1544,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 3b. Check for function proxy.
__ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy.
+ __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
__ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
__ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function.
+ __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
// 3c. Patch the first argument when calling a non-function. The
// CALL_NON_FUNCTION builtin expects the non-function callee as
@@ -1683,7 +1779,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&call_proxy);
__ push(a1); // Add function proxy as last argument.
__ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.cc b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
index b1fe4d5..6abccaf 100644
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.cc
+++ b/src/3rdparty/v8/src/mips/code-stubs-mips.cc
@@ -33,17 +33,90 @@
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
namespace v8 {
namespace internal {
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a3, a2, a1, a0 };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0, a1 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ Address entry =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
+}
+
+
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // a1 -- constructor function
+ // a2 -- type info cell with elements kind
+ // a0 -- number of arguments to the constructor function
+ static Register registers[] = { a1, a2 };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &a0;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
- bool never_nan_nan);
+ Condition cc);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -252,8 +325,10 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
// Copy the qml global object from the surrounding context.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ lw(a1,
+ MemOperand(cp, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ sw(a1,
+ MemOperand(v0, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
// Initialize the rest of the slots to undefined.
@@ -342,6 +417,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
+ AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
// a3: boilerplate literal array.
@@ -354,7 +430,13 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
- int size = JSArray::kSize + elements_size;
+
+ int size = JSArray::kSize;
+ int allocation_info_start = size;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
@@ -365,6 +447,13 @@ static void GenerateFastCloneShallowArrayCommon(
fail,
TAG_OBJECT);
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
+ allocation_site_info_map())));
+ __ sw(a2, FieldMemOperand(v0, allocation_info_start));
+ __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
+ }
+
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
@@ -377,7 +466,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ Addu(a2, v0, Operand(JSArray::kSize));
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
+ } else {
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ }
__ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
// Copy the elements array.
@@ -412,16 +505,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
__ Branch(&check_fast_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
__ bind(&check_fast_elements);
__ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
__ Branch(&double_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
@@ -452,7 +547,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(a3);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
+ allocation_site_mode_,
+ &slow_case);
// Return and remove the on-stack parameters.
__ DropAndRet(3);
@@ -462,55 +559,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: object literal flags.
- // [sp + kPointerSize]: constant properties.
- // [sp + (2 * kPointerSize)]: literal index.
- // [sp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into a3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 3 * kPointerSize));
- __ lw(a0, MemOperand(sp, 2 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, t0, a3);
- __ lw(a3, MemOperand(a3));
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t0));
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
- __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(4);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
// scratch register. Destroys the source register. No GC occurs during this
// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public CodeStub {
+class ConvertToDoubleStub : public PlatformCodeStub {
public:
ConvertToDoubleStub(Register result_reg_1,
Register result_reg_2,
@@ -626,34 +680,16 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
__ mov(scratch1, a0);
ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
__ push(ra);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
// Write Smi from a1 to a1 and a0 in double format.
__ mov(scratch1, a1);
ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(ra);
}
}
-void FloatingPointHelper::LoadOperands(
- MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* slow) {
-
- // Load right operand (a0) to f12 or a2/a3.
- LoadNumber(masm, destination,
- a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
-
- // Load left operand (a1) to f14 or a0/a1.
- LoadNumber(masm, destination,
- a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
-}
-
-
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -711,7 +747,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ mov(scratch1, object);
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(ra);
- __ Call(stub.GetCode());
+ __ Call(stub.GetCode(masm->isolate()));
__ pop(ra);
}
@@ -762,13 +798,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
FPURegister double_dst,
- Register dst1,
- Register dst2,
+ Register dst_mantissa,
+ Register dst_exponent,
Register scratch2,
FPURegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst1));
- ASSERT(!int_scratch.is(dst2));
+ ASSERT(!int_scratch.is(dst_mantissa));
+ ASSERT(!int_scratch.is(dst_exponent));
Label done;
@@ -777,64 +813,65 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ mtc1(int_scratch, single_scratch);
__ cvt_d_w(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst2 | dst1 |
+ // | dst_exponent | dst_mantissa |
// | s | exp | mantissa |
// Check for zero.
- __ mov(dst2, int_scratch);
- __ mov(dst1, int_scratch);
+ __ mov(dst_exponent, int_scratch);
+ __ mov(dst_mantissa, int_scratch);
__ Branch(&done, eq, int_scratch, Operand(zero_reg));
// Preload the sign of the value.
- __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
+ __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
// Get the absolute value of the object (as an unsigned integer).
Label skip_sub;
- __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
+ __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ Clz(dst1, int_scratch);
+ __ Clz(dst_mantissa, int_scratch);
__ li(scratch2, 31);
- __ Subu(dst1, scratch2, dst1);
+ __ Subu(dst_mantissa, scratch2, dst_mantissa);
// Set the exponent.
- __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
- __ Ins(dst2, scratch2,
+ __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst_exponent, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst1);
+ __ sllv(scratch2, scratch2, dst_mantissa);
__ li(at, -1);
__ Xor(scratch2, scratch2, at);
__ And(int_scratch, int_scratch, scratch2);
// Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Subu(scratch2, dst_mantissa,
+ Operand(HeapNumber::kMantissaBitsInTopWord));
__ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
// Set the higher 20 bits of the mantissa.
__ srlv(at, int_scratch, scratch2);
- __ or_(dst2, dst2, at);
+ __ or_(dst_exponent, dst_exponent, at);
__ li(at, 32);
__ subu(scratch2, at, scratch2);
- __ sllv(dst1, int_scratch, scratch2);
+ __ sllv(dst_mantissa, int_scratch, scratch2);
__ Branch(&done);
__ bind(&fewer_than_20_useful_bits);
__ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst1);
+ __ subu(scratch2, at, dst_mantissa);
__ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst2, dst2, scratch2);
- // Set dst1 to 0.
- __ mov(dst1, zero_reg);
+ __ Or(dst_exponent, dst_exponent, scratch2);
+ // Set dst_mantissa to 0.
+ __ mov(dst_mantissa, zero_reg);
}
__ bind(&done);
}
@@ -844,8 +881,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DoubleRegister double_dst,
- Register dst1,
- Register dst2,
+ DoubleRegister double_scratch,
+ Register dst_mantissa,
+ Register dst_exponent,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -861,8 +899,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
- scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
+ dst_exponent, scratch2, single_scratch);
__ Branch(&done);
__ bind(&obj_is_not_smi);
@@ -879,9 +917,10 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_dst,
scratch1,
+ double_dst,
+ at,
+ double_scratch,
except_flag,
kCheckForInexactConversion);
@@ -889,27 +928,51 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
- __ Move(dst1, dst2, double_dst);
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
+ if (save_registers) {
+ // Save both output registers, because the other one probably holds
+ // an important value too.
+ __ Push(dst_exponent, dst_mantissa);
+ }
+ __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0.
- __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst2));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
+ Label zero;
+ __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst_mantissa));
+ __ Branch(&zero, eq, scratch1, Operand(zero_reg));
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
+ Label restore_input_and_miss;
+ DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
+ &restore_input_and_miss);
+
+ // dst_* were trashed. Reload the double value.
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ __ Branch(&done);
- // dst1 and dst2 were trashed. Reload the double value.
- __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ __ bind(&restore_input_and_miss);
+ if (save_registers) {
+ __ Pop(dst_exponent, dst_mantissa);
+ }
+ __ Branch(not_int32);
+
+ __ bind(&zero);
+ if (save_registers) {
+ __ Drop(2);
+ }
}
__ bind(&done);
@@ -923,7 +986,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DoubleRegister double_scratch,
+ DoubleRegister double_scratch0,
+ DoubleRegister double_scratch1,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -931,36 +995,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done;
+ Label done, maybe_undefined;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
+
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Load the double value.
- __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
- FPURegister single_scratch = double_scratch.low();
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- double_scratch,
+ dst,
+ double_scratch0,
scratch1,
+ double_scratch1,
except_flag,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- // Get the result in the destination register.
- __ mfc1(dst, single_scratch);
-
} else {
// Load the double value in the destination registers.
__ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -992,20 +1054,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ Subu(dst, zero_reg, dst);
__ bind(&skip_sub);
}
+ __ Branch(&done);
+
+ __ bind(&maybe_undefined);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(not_int32, ne, object, Operand(at));
+ // |undefined| is truncated to 0.
+ __ li(dst, Operand(Smi::FromInt(0)));
+ // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
+ Register src_exponent,
+ Register src_mantissa,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ext(scratch,
- src1,
+ src_exponent,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1025,11 +1095,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ srl(at, src1, 31);
+ __ srl(at, src_exponent, 31);
__ subu(tmp, scratch, at);
__ Branch(not_int32, gt, tmp, Operand(30));
// - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src2, 0x3fffff);
+ __ And(tmp, src_mantissa, 0x3fffff);
__ Branch(not_int32, ne, tmp, Operand(zero_reg));
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1040,20 +1110,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
- src2,
+ src_mantissa,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
+ __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
__ or_(dst, dst, at);
// Create the mask and test the lower bits (of the higher bits).
__ li(at, 32);
__ subu(scratch, at, scratch);
- __ li(src2, 1);
- __ sllv(src1, src2, scratch);
- __ Subu(src1, src1, Operand(1));
- __ And(src1, dst, src1);
- __ Branch(not_int32, ne, src1, Operand(zero_reg));
+ __ li(src_mantissa, 1);
+ __ sllv(src_exponent, src_mantissa, scratch);
+ __ Subu(src_exponent, src_exponent, Operand(1));
+ __ And(src_exponent, dst, src_exponent);
+ __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
}
@@ -1128,11 +1198,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() {
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode()->set_is_pregenerated(true);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
@@ -1192,48 +1263,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc,
- bool never_nan_nan) {
+ Condition cc) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
- // The two objects are identical. If we know that one of them isn't NaN then
- // we now know they test equal.
- if (cc != eq || !never_nan_nan) {
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
}
+ __ Ret();
}
}
}
@@ -1249,46 +1315,44 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- if (cc != eq || !never_nan_nan) {
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
- }
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
}
- __ Ret();
}
- // No fall through here.
+ __ Ret();
}
+ // No fall through here.
__ bind(&not_identical);
}
@@ -1336,7 +1400,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, rhs);
ConvertToDoubleStub stub1(a1, a0, t6, t5);
__ push(ra);
- __ Call(stub1.GetCode());
+ __ Call(stub1.GetCode(masm->isolate()));
__ pop(ra);
}
@@ -1371,7 +1435,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ mov(t6, lhs);
ConvertToDoubleStub stub2(a3, a2, t6, t5);
__ push(ra);
- __ Call(stub2.GetCode());
+ __ Call(stub2.GetCode(masm->isolate()));
__ pop(ra);
// Load rhs to a double in a1, a0.
if (rhs.is(a0)) {
@@ -1562,12 +1626,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// Check for oddballs: true, false, null, undefined.
__ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
- // Now that we have the types we might as well check for symbol-symbol.
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
+ // Now that we have the types we might as well check for
+ // internalized-internalized.
+ // Ensure that no non-strings have the internalized bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(t2, a2, Operand(a3));
- __ And(t0, t2, Operand(kIsSymbolMask));
+ __ And(t0, t2, Operand(kIsInternalizedMask));
__ Branch(&return_not_equal, ne, t0, Operand(zero_reg));
}
@@ -1605,30 +1670,30 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
}
-// Fast negative check for symbol-to-symbol equality.
-static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm,
- Register lhs,
- Register rhs,
- Label* possible_strings,
- Label* not_both_strings) {
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+ Register lhs,
+ Register rhs,
+ Label* possible_strings,
+ Label* not_both_strings) {
ASSERT((lhs.is(a0) && rhs.is(a1)) ||
(lhs.is(a1) && rhs.is(a0)));
// a2 is object type of lhs.
- // Ensure that no non-strings have the symbol bit set.
+ // Ensure that no non-strings have the internalized bit set.
Label object_test;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(at, a2, Operand(kIsNotStringMask));
__ Branch(&object_test, ne, at, Operand(zero_reg));
- __ And(at, a2, Operand(kIsSymbolMask));
+ __ And(at, a2, Operand(kIsInternalizedMask));
__ Branch(possible_strings, eq, at, Operand(zero_reg));
__ GetObjectType(rhs, a3, a3);
__ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
- __ And(at, a3, Operand(kIsSymbolMask));
+ __ And(at, a3, Operand(kIsInternalizedMask));
__ Branch(possible_strings, eq, at, Operand(zero_reg));
- // Both are symbols. We already checked they weren't the same pointer
- // so they are not equal.
+ // Both are internalized strings. We already checked they weren't the same
+ // pointer so they are not equal.
__ Ret(USE_DELAY_SLOT);
__ li(v0, Operand(1)); // Non-zero indicates not equal.
@@ -1761,30 +1826,48 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
-// On exit, v0 is 0, positive, or negative (smi) to indicate the result
-// of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
- Label slow; // Call builtin.
- Label not_smis, both_loaded_as_doubles;
+static void ICCompareStub_CheckInputType(MacroAssembler* masm,
+ Register input,
+ Register scratch,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+ DONT_DO_SMI_CHECK);
+ }
+ // We could be strict about internalized/string here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
- if (include_smi_compare_) {
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
- } else if (FLAG_debug_code) {
- __ Or(a2, a1, a0);
- __ And(a2, a2, kSmiTagMask);
- __ Assert(ne, "CompareStub: unexpected smi operands.",
- a2, Operand(zero_reg));
- }
+// On entry a1 and a2 are the values to be compared.
+// On exit a0 is 0, positive or negative to indicate the result of
+// the comparison.
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+ Register lhs = a1;
+ Register rhs = a0;
+ Condition cc = GetCondition();
+ Label miss;
+ ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
+ ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+
+ Label slow; // Call builtin.
+ Label not_smis, both_loaded_as_doubles;
+
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
+ __ bind(&not_two_smis);
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -1811,11 +1894,14 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Check if the UseUserComparison flag is set by using the map of t0 for lhs
__ lbu(t0, FieldMemOperand(t0, Map::kBitField2Offset));
__ And(t0, t0, Operand(1 << Map::kUseUserObjectComparison));
- __ Branch(&user_compare, eq, t0, Operand(1 << Map::kUseUserObjectComparison));
+ __ Branch(&user_compare,
+ eq,
+ t0,
+ Operand(1 << Map::kUseUserObjectComparison));
- // Check if the UseUserComparison flag is _not_ set by using the map of t1 for
- // rhs and then jump to the miss label.
+ // Check if the UseUserComparison flag is _not_ set by using the map of t1
+ // for rhs and then jump to the miss label.
__ lbu(t1, FieldMemOperand(t1, Map::kBitField2Offset));
__ And(t1, t1, Operand(1 << Map::kUseUserObjectComparison));
__ Branch(&miss, ne, t1, Operand(1 << Map::kUseUserObjectComparison));
@@ -1831,13 +1917,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
+ EmitIdenticalObjectComparison(masm, &slow, cc);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs_, Operand(rhs_));
+ __ And(t2, lhs, Operand(rhs));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1847,8 +1933,8 @@ void CompareStub::Generate(MacroAssembler* masm) {
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs_, rhs_,
- &both_loaded_as_doubles, &slow, strict_);
+ EmitSmiNonsmiComparison(masm, lhs, rhs,
+ &both_loaded_as_doubles, &slow, strict());
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
@@ -1884,7 +1970,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
@@ -1893,61 +1979,64 @@ void CompareStub::Generate(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc_);
+ EmitNanCheck(masm, cc);
// Compares two doubles that are not NaNs. Returns the answer.
// Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc_);
+ EmitTwoNonNanDoubleComparison(masm, cc);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict_) {
+ if (strict()) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
+ EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
}
- Label check_for_symbols;
+ Label check_for_internalized_strings;
Label flat_string_check;
// Check for heap-number-heap-number comparison. Can jump to slow case,
// or load both doubles and jump to the code that handles
- // that case. If the inputs are not doubles then jumps to check_for_symbols.
+ // that case. If the inputs are not doubles then jumps to
+ // check_for_internalized_strings.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
&both_loaded_as_doubles,
- &check_for_symbols,
+ &check_for_internalized_strings,
&flat_string_check);
- __ bind(&check_for_symbols);
- if (cc_ == eq && !strict_) {
- // Returns an answer for two symbols or two detectable objects.
+ __ bind(&check_for_internalized_strings);
+ if (cc == eq && !strict()) {
+ // Returns an answer for two internalized strings or two
+ // detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
+ EmitCheckForInternalizedStringsOrObjects(
+ masm, lhs, rhs, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc_ == eq) {
+ if (cc == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs_,
- rhs_,
+ lhs,
+ rhs,
a2,
a3,
t0,
@@ -1958,18 +2047,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
- __ Push(lhs_, rhs_);
+ __ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc_ == eq) {
- native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == eq) {
+ native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
- if (cc_ == lt || cc_ == le) {
+ if (cc == lt || cc == le) {
ncr = GREATER;
} else {
- ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
+ ASSERT(cc == gt || cc == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1979,6 +2068,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -2147,8 +2239,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
+ case UnaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
@@ -2228,13 +2320,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
- GenerateHeapNumberStubSub(masm);
+ GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
+ GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -2242,7 +2334,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin);
__ bind(&non_smi);
@@ -2254,7 +2346,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi);
__ bind(&non_smi);
@@ -2357,7 +2449,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
// WriteInt32ToHeapNumberStub does not trigger GC, so we do not
// have to set up a frame.
WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
}
__ bind(&impossible);
@@ -2419,20 +2511,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(
}
+void BinaryOpStub::Initialize() {
+ platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
+}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(a1, a0);
__ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ li(a1, Operand(Smi::FromInt(op_)));
- __ li(a0, Operand(Smi::FromInt(operands_type_)));
- __ Push(a2, a1, a0);
+ __ push(a2);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
@@ -2443,59 +2538,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-
-void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
+ Token::Value op) {
Register left = a1;
Register right = a0;
@@ -2506,7 +2550,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ AdduAndCheckForOverflow(v0, left, right, scratch1);
__ RetOnNoOverflow(scratch1);
@@ -2649,10 +2693,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required) {
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode);
+
+
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
+ BinaryOpIC::TypeInfo left_type,
+ BinaryOpIC::TypeInfo right_type,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required,
+ Label* miss,
+ Token::Value op,
+ OverwriteMode mode) {
Register left = a1;
Register right = a0;
Register scratch1 = t3;
@@ -2664,11 +2722,17 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
+ if (left_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, miss);
+ }
+ if (right_type == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, miss);
+ }
Register heap_number_map = t2;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op_) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2678,25 +2742,42 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// depending on whether FPU is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(FPU) &&
- op_ != Token::MOD ?
+ op != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = s0;
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- FloatingPointHelper::LoadOperands(masm,
- destination,
- heap_number_map,
- scratch1,
- scratch2,
- not_numbers);
+ // Load right operand to f14 or a2/a3.
+ if (right_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, right, destination, f14, f16, a2, a3, heap_number_map,
+ scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, right, f14, a2, a3, heap_number_map,
+ scratch1, scratch2, fail);
+ }
+ // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
+ // jumps to |miss|.
+ if (left_type == BinaryOpIC::INT32) {
+ FloatingPointHelper::LoadNumberAsInt32Double(
+ masm, left, destination, f12, f16, a0, a1, heap_number_map,
+ scratch1, scratch2, f2, miss);
+ } else {
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
+ FloatingPointHelper::LoadNumber(
+ masm, destination, left, f12, a0, a1, heap_number_map,
+ scratch1, scratch2, fail);
+ }
}
// Calculate the result.
@@ -2705,7 +2786,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// f12: Left value.
// f14: Right value.
CpuFeatures::Scope scope(FPU);
- switch (op_) {
+ switch (op) {
case Token::ADD:
__ add_d(f10, f12, f14);
break;
@@ -2731,7 +2812,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op_,
+ op,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2771,7 +2852,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
not_numbers);
}
Label result_not_a_smi;
- switch (op_) {
+ switch (op) {
case Token::BIT_OR:
__ Or(a2, a3, Operand(a2));
break;
@@ -2821,8 +2902,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required,
+ mode);
}
// a2: Answer as signed int32.
@@ -2837,7 +2919,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(FPU);
__ mtc1(a2, f0);
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ Cvt_d_uw(f0, f0, f22);
} else {
__ cvt_d_w(f0, f0);
@@ -2864,12 +2946,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the lable gc_required.
-void BinaryOpStub::GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the label gc_required.
+void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ Token::Value op,
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ OverwriteMode mode) {
Label not_smis;
Register left = a1;
@@ -2882,12 +2966,14 @@ void BinaryOpStub::GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
- GenerateFPOperation(masm, true, use_runtime, gc_required);
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
+ BinaryOpStub_GenerateFPOperation(
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
+ use_runtime, gc_required, &not_smis, op, mode);
}
__ bind(&not_smis);
}
@@ -2899,14 +2985,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm,
- &call_runtime,
- &call_runtime,
- ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
+ mode_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2914,22 +3000,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2958,7 +3036,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::INT32);
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
Register left = a1;
Register right = a0;
@@ -2981,7 +3059,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ Or(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- GenerateSmiSmiOperation(masm);
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2991,6 +3069,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(left, &transition);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ __ JumpIfNotSmi(right, &transition);
+ }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
@@ -3003,6 +3090,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
f14,
+ f16,
a2,
a3,
heap_number_map,
@@ -3014,6 +3102,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
f12,
+ f16,
t0,
t1,
heap_number_map,
@@ -3050,9 +3139,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
- f10,
scratch1,
+ f10,
+ at,
+ f16,
except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
@@ -3061,7 +3151,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
- __ mfc1(scratch1, single_scratch);
__ Addu(scratch2, scratch1, Operand(0x40000000));
// If not try to return a heap number.
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
@@ -3083,16 +3172,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
// Return a heap number, or fall through to type transition or runtime
// call if we can't.
- if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER
: BinaryOpIC::INT32)) {
// We are using FPU registers so s0 is available.
heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
__ mov(v0, heap_number_result);
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3110,12 +3200,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = s0;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime,
+ mode_);
// Load the left value from the value saved on the stack.
__ Pop(a1, a0);
@@ -3154,6 +3245,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
+ f2,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -3163,6 +3255,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
+ f2,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -3224,12 +3317,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = t1;
- GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime);
+ BinaryOpStub_GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime,
+ mode_);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3273,6 +3367,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3306,25 +3401,37 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ bind(&done);
- GenerateHeapNumberStub(masm);
+ GenerateNumberStub(masm);
}
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime;
- GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
+ Label call_runtime, transition;
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &transition, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime;
+ Label call_runtime, call_string_add_or_runtime, transition;
- GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
- GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
+ BinaryOpStub_GenerateFPOperation(
+ masm, left_type_, right_type_, false,
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+
+ __ bind(&transition);
+ GenerateTypeTransition(masm);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3332,6 +3439,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
+ GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3367,63 +3475,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::GenerateHeapResultAllocation(
- MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
-
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ OverwriteMode mode) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(a0) && !result.is(a1));
- if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3436,7 +3501,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(
__ mov(result, overwritable_operand);
__ bind(&allocated);
} else {
- ASSERT(mode_ == NO_OVERWRITE);
+ ASSERT(mode == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3757,9 +3822,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
- double_exponent,
scratch,
+ double_exponent,
+ at,
+ double_scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
@@ -3817,7 +3883,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch);
+ __ PrepareCallCFunction(0, 2, scratch2);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
@@ -3828,7 +3894,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
- __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
@@ -3929,31 +3994,59 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
- CEntryStub save_doubles(1, kSaveFPRegs);
- Handle<Code> code = save_doubles.GetCode();
- code->set_is_pregenerated(true);
- StoreBufferOverflowStub stub(kSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
- code->GetIsolate()->set_fp_stubs_generated(true);
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub save_doubles(1, mode);
+ StoreBufferOverflowStub stub(mode);
+ // These stubs might already be in the snapshot, detect that and don't
+ // regenerate, which would lead to code stub initialization state being messed
+ // up.
+ Code* save_doubles_code = NULL;
+ Code* store_buffer_overflow_code = NULL;
+ if (!save_doubles.FindCodeInCache(&save_doubles_code, ISOLATE)) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope2(FPU);
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ } else {
+ save_doubles_code = *save_doubles.GetCode(isolate);
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
+ }
+ ISOLATE->set_fp_stubs_generated(true);
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode();
+ Handle<Code> code = stub.GetCode(isolate);
code->set_is_pregenerated(true);
}
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ andi(scratch, value, 0xf);
+ __ Branch(oom_label, eq, scratch, Operand(0xf));
+}
+
+
void CEntryStub::GenerateCore(MacroAssembler* masm,
Label* throw_normal_exception,
Label* throw_termination_exception,
@@ -4060,14 +4153,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ Branch(&retry, eq, t0, Operand(zero_reg));
// Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(USE_DELAY_SLOT,
- throw_out_of_memory_exception,
- eq,
- v0,
- Operand(reinterpret_cast<int32_t>(out_of_memory)));
- // If we throw the OOM exception, the value of a3 doesn't matter.
- // Any instruction can be in the delay slot that's not a jump.
+ JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
@@ -4154,13 +4240,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
isolate);
- __ li(a0, Operand(false, RelocInfo::NONE));
+ __ li(a0, Operand(false, RelocInfo::NONE32));
__ li(a2, Operand(external_caught));
__ sw(a0, MemOperand(a2));
// Set pending exception and v0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
+ Label already_have_failure;
+ JumpIfOOM(masm, v0, t0, &already_have_failure);
+ Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
__ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ bind(&already_have_failure);
__ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ sw(v0, MemOperand(a2));
@@ -4531,12 +4620,177 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+void ArrayLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->prototype_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : key
+ // -- a1 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a0,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a1;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- a2 : name
+ // -- ra : return address
+ // -- a0 : receiver
+ // -- sp[0] : receiver
+ // -----------------------------------
+ receiver = a0;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
+ support_wrapper_);
+
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+ Label miss;
+
+ Register receiver;
+ Register value;
+ if (kind() == Code::KEYED_STORE_IC) {
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : key
+ // -- a2 : receiver
+ // -----------------------------------
+ __ Branch(&miss, ne, a1,
+ Operand(masm->isolate()->factory()->length_string()));
+ receiver = a2;
+ value = a0;
+ } else {
+ ASSERT(kind() == Code::STORE_IC);
+ // ----------- S t a t e -------------
+ // -- ra : return address
+ // -- a0 : value
+ // -- a1 : receiver
+ // -- a2 : key
+ // -----------------------------------
+ receiver = a1;
+ value = a0;
+ }
+ Register scratch = a3;
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ GetObjectType(receiver, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
+ __ GetObjectType(scratch, scratch, scratch);
+ __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
+ __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
+ __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+ __ Branch(&miss, eq, scratch, Operand(at));
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ Push(receiver, value);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::GenerateStoreMiss(masm, kind());
+}
+
+
Register InstanceofStub::left() { return a0; }
Register InstanceofStub::right() { return a1; }
+void LoadFieldStub::Generate(MacroAssembler* masm) {
+ StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_);
+ __ Ret();
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
@@ -4959,8 +5213,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
Isolate* isolate = masm->isolate();
- Label runtime, invoke_regexp;
-
+ Label runtime;
// Allocation of registers for this function. These are in callee save
// registers and will be preserved by the call to the native RegExp code, as
// this code is called using the normal C calling convention. When calling
@@ -5012,149 +5265,111 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ lw(a2,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2. This
- // uses the asumption that smis are 2 * their untagged value.
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures * 2 <= offsets vector size - 2
+ // Multiplying by 2 comes for free since a2 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ Addu(a2, a2, Operand(2)); // a2 was a smi.
- // Check that the static offsets vector buffer is large enough.
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
__ Branch(
- &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize));
-
- // a2: Number of capture registers
- // regexp_data: RegExp data (FixedArray)
- // Check that the second argument is a string.
- __ lw(subject, MemOperand(sp, kSubjectOffset));
- __ JumpIfSmi(subject, &runtime);
- __ GetObjectType(subject, a0, a0);
- __ And(a0, a0, Operand(kIsNotStringMask));
- STATIC_ASSERT(kStringTag == 0);
- __ Branch(&runtime, ne, a0, Operand(zero_reg));
-
- // Get the length of the string to r3.
- __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
-
- // a2: Number of capture registers
- // a3: Length of subject string as a smi
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the third argument is a positive smi less than the subject
- // string length. A negative value will be greater (unsigned comparison).
- __ lw(a0, MemOperand(sp, kPreviousIndexOffset));
- __ JumpIfNotSmi(a0, &runtime);
- __ Branch(&runtime, ls, a3, Operand(a0));
-
- // a2: Number of capture registers
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check that the fourth object is a JSArray object.
- __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
- __ JumpIfSmi(a0, &runtime);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE));
- // Check that the JSArray is in fast case.
- __ lw(last_match_info_elements,
- FieldMemOperand(a0, JSArray::kElementsOffset));
- __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ Branch(&runtime, ne, a0, Operand(
- isolate->factory()->fixed_array_map()));
- // Check that the last match info has space for the capture registers and the
- // additional information.
- __ lw(a0,
- FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
- __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead));
- __ sra(at, a0, kSmiTagSize); // Untag length for comparison.
- __ Branch(&runtime, gt, a2, Operand(at));
+ &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
// Reset offset for possibly sliced string.
__ mov(t0, zero_reg);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_string;
+ __ lw(subject, MemOperand(sp, kSubjectOffset));
+ __ JumpIfSmi(subject, &runtime);
+ __ mov(a3, subject); // Make a copy of the original subject string.
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
- // First check for flat string. None of the following string type tests will
- // succeed if subject is not a string or a short external string.
+ // subject: subject string
+ // a3: subject string
+ // a0: subject string instance type
+ // regexp_data: RegExp data (FixedArray)
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential string? If yes, go to (5).
+ // (2) Anything but sequential or cons? If yes, go to (6).
+ // (3) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (4) Is subject external? If yes, go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (6) Not a long external string? If yes, go to (8).
+ // (7) External string. Make it, offset-wise, look like a sequential string.
+ // Go to (5).
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+
+ Label seq_string /* 5 */, external_string /* 7 */,
+ check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
+ not_long_external /* 8 */;
+
+ // (1) Sequential string? If yes, go to (5).
__ And(a1,
a0,
Operand(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
- __ Branch(&seq_string, eq, a1, Operand(zero_reg));
+ __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5).
- // subject: Subject string
- // a0: instance type if Subject string
- // regexp_data: RegExp data (FixedArray)
- // a1: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ // (2) Anything but sequential or cons? If yes, go to (6).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
- __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
- __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
+ // Go to (6).
+ __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
-
- // String is sliced.
- __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
- __ sra(t0, t0, kSmiTagSize);
- __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
- // t5: offset of sliced string, smi-tagged.
- __ jmp(&check_encoding);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (3) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
- __ LoadRoot(a1, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(a1, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, a0, Operand(a1));
__ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
- // Is first part of cons or parent of slice a flat string?
- __ bind(&check_encoding);
+
+ // (4) Is subject external? If yes, go to (7).
+ __ bind(&check_underlying);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
- __ Branch(&external_string, ne, at, Operand(zero_reg));
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7).
+ // (5) Sequential string. Load regexp code according to encoding.
__ bind(&seq_string);
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // a0: Instance type of subject string
+ // subject: sequential subject string (or look-alike, external string)
+ // a3: original subject string
+ // Load previous index and check range before a3 is overwritten. We have to
+ // use a3 instead of subject here because subject might have been only made
+ // to look like a sequential string when it actually is an external string.
+ __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(a1, &runtime);
+ __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
+ __ Branch(&runtime, ls, a3, Operand(a1));
+ __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
+
STATIC_ASSERT(kStringEncodingMask == 4);
STATIC_ASSERT(kOneByteStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
- // Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
__ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
__ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
__ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
__ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+ // (E) Carry on. String handling is done.
+ // t9: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// a smi (code flushing support).
__ JumpIfSmi(t9, &runtime);
- // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
- // t9: code
- // subject: Subject string
- // regexp_data: RegExp data (FixedArray)
- // Load used arguments before starting to push arguments for call to native
- // RegExp code to avoid handling changing stack height.
- __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
- __ sra(a1, a1, kSmiTagSize); // Untag the Smi.
-
// a1: previous index
// a3: encoding of subject string (1 if ASCII, 0 if two_byte);
// t9: code
@@ -5249,9 +5464,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
-
// Check the result.
-
Label success;
__ Branch(&success, eq, v0, Operand(1));
// We expect exactly one result since we force the called regexp to behave
@@ -5292,10 +5505,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a1,
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
// Calculate number of capture registers (number_of_captures + 1) * 2.
+ // Multiplying by 2 comes for free since r1 is smi-tagged.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
__ Addu(a1, a1, Operand(2)); // a1 was a smi.
+ __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
+ __ JumpIfSmi(a0, &runtime);
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
+ // Check that the JSArray is in fast case.
+ __ lw(last_match_info_elements,
+ FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&runtime, ne, a0, Operand(at));
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ lw(a0,
+ FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+ __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+ __ sra(at, a0, kSmiTagSize);
+ __ Branch(&runtime, gt, a2, Operand(at));
+
// a1: number of capture registers
// subject: subject string
// Store the capture count.
@@ -5309,10 +5541,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(a2, subject);
__ RecordWriteField(last_match_info_elements,
RegExpImpl::kLastSubjectOffset,
- a2,
+ subject,
t3,
kRAHasNotBeenSaved,
kDontSaveFPRegs);
+ __ mov(subject, a2);
__ sw(subject,
FieldMemOperand(last_match_info_elements,
RegExpImpl::kLastInputOffset));
@@ -5354,8 +5587,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
__ DropAndRet(4);
- // External string. Short external strings have already been ruled out.
- // a0: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (6) Not a long external string? If yes, go to (8).
+ __ bind(&not_seq_nor_cons);
+ // Go to (8).
+ __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
+
+ // (7) External string. Make it, offset-wise, look like a sequential string.
__ bind(&external_string);
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
@@ -5371,15 +5613,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
- __ jmp(&seq_string);
+ __ jmp(&seq_string); // Go to (5).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // (8) Short external string or not a string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+ __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+ // (9) Sliced string. Replace subject with parent. Go to (4).
+ // Load offset into t0 and replace subject string with parent.
+ __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+ __ sra(t0, t0, kSmiTagSize);
+ __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+ __ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@@ -5471,12 +5722,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// a1 : the function to call
// a2 : cache cell for call target
+ ASSERT(!FLAG_optimize_constructed_arrays);
Label done;
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
@@ -5513,6 +5765,78 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // a1 : the function to call
+ // a2 : cache cell for call target
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state into a3.
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Branch(&done, eq, a3, Operand(a1));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, eq, a3, Operand(at));
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ LAST_FAST_ELEMENTS_KIND);
+ __ Branch(&miss, ne, a3, Operand(terminal_kind_sentinel));
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&megamorphic, ne, a1, Operand(a3));
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&initialize, eq, a3, Operand(at));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(a3);
+ __ Branch(&not_array_function, ne, a1, Operand(a3));
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(),
+ GetInitialFastElementsKind());
+ __ li(a3, Operand(initial_kind_sentinel));
+ __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&done);
+
+ __ bind(&not_array_function);
+ __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
// a2 : cache cell for call target
@@ -5545,7 +5869,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Invoke the function now.
@@ -5583,8 +5911,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// Check for function proxy.
__ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
__ push(a1); // Put proxy as additional argument.
- __ li(a0, Operand(argc_ + 1, RelocInfo::NONE));
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
__ SetCallKind(t1, CALL_AS_METHOD);
{
@@ -5619,13 +5947,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2;
+ __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(jmp_reg, FieldMemOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
// a0: number of arguments
@@ -5641,52 +5975,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ bind(&do_call);
// Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE));
+ __ li(a2, Operand(0, RelocInfo::NONE32));
__ SetCallKind(t1, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- const char* cc_name;
- switch (cc_) {
- case lt: cc_name = "LT"; break;
- case gt: cc_name = "GT"; break;
- case le: cc_name = "LE"; break;
- case ge: cc_name = "GE"; break;
- case eq: cc_name = "EQ"; break;
- case ne: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == eq || cc_ == ne;
- stream->Add("CompareStub_%s", cc_name);
- stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
- stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
- ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
- (lhs_.is(a1) && rhs_.is(a0)));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(lhs_.is(a0))
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5797,11 +6092,11 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
+ ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
__ And(t0,
code_,
Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
__ Branch(&slow_case_, ne, t0, Operand(zero_reg));
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -5834,23 +6129,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
@@ -5996,7 +6274,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -6009,7 +6287,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ Subu(scratch, c1, Operand(static_cast<int>('0')));
__ Branch(&not_array_index,
@@ -6044,43 +6322,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load symbol table.
- // Load address of first element of the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load string table.
+ // Load address of first element of the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
Register undefined = scratch4;
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
- __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset));
+ __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
__ sra(mask, mask, 1);
__ Addu(mask, mask, -1);
- // Calculate untagged address of the first element of the symbol table.
- Register first_symbol_table_element = symbol_table;
- __ Addu(first_symbol_table_element, symbol_table,
- Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag));
+ // Calculate untagged address of the first element of the string table.
+ Register first_string_table_element = string_table;
+ __ Addu(first_string_table_element, string_table,
+ Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
// Registers.
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string
// mask: capacity mask
- // first_symbol_table_element: address of the first element of
- // the symbol table
+ // first_string_table_element: address of the first element of
+ // the string table
// undefined: the undefined object
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch5; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
if (i > 0) {
- __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i)));
+ __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
} else {
__ mov(candidate, hash);
}
@@ -6088,9 +6366,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
__ And(candidate, candidate, Operand(mask));
// Load the entry from the symble table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ sll(scratch, candidate, kPointerSizeLog2);
- __ Addu(scratch, scratch, first_symbol_table_element);
+ __ Addu(scratch, scratch, first_string_table_element);
__ lw(candidate, MemOperand(scratch));
// If entry is undefined no string with this hash can be found.
@@ -6102,7 +6380,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Must be the hole (deleted entry).
if (FLAG_debug_code) {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ Assert(eq, "oddball in symbol table is not undefined or the hole",
+ __ Assert(eq, "oddball in string table is not undefined or the hole",
scratch, Operand(candidate));
}
__ jmp(&next_probe[i]);
@@ -6120,8 +6398,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
- __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6130,7 +6408,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
__ mov(v0, result);
}
@@ -6231,6 +6509,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&runtime, ne, t0, Operand(zero_reg));
+ Label single_char;
+ __ Branch(&single_char, eq, a2, Operand(1));
+
// Short-cut for the case of trivial substring.
Label return_v0;
// v0: original string
@@ -6260,7 +6541,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Branch(&sliced_string, ne, t0, Operand(zero_reg));
// Cons string. Check whether it is flat, then fetch first part.
__ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(t0, Heap::kempty_stringRootIndex);
__ Branch(&runtime, ne, t1, Operand(t0));
__ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
// Update instance type.
@@ -6337,8 +6618,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
@@ -6353,13 +6634,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6391,6 +6672,18 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // v0: original string
+ // a1: instance type
+ // a2: length
+ // a3: from index (untagged)
+ __ SmiTag(a3, a3);
+ StringCharAtGenerator generator(
+ v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ DropAndRet(3);
+ generator.SkipSlow(masm, &runtime);
}
@@ -6491,7 +6784,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6631,8 +6924,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Adding two lengths can't overflow.
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
__ Addu(t2, a2, Operand(a3));
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return a string here.
__ Branch(&longer_than_two, ne, t2, Operand(2));
// Check that both strings are non-external ASCII strings.
@@ -6646,13 +6939,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6665,7 +6958,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6712,12 +7005,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ And(at, t0, Operand(kAsciiDataHintMask));
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
-
- __ xor_(t0, t0, t1);
+ __ Xor(t0, t0, Operand(t1));
STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0);
__ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
- __ Branch(
- &ascii_data, eq, t0, Operand(kOneByteStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0,
+ Operand(kOneByteStringTag | kAsciiDataHintTag));
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
@@ -6750,11 +7042,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6765,11 +7057,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6790,7 +7082,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
@@ -6878,7 +7170,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -6899,19 +7191,19 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- __ And(a2, a1, Operand(a0));
- __ JumpIfSmi(a2, &generic_stub);
- __ GetObjectType(a0, a2, a2);
- __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a1, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(a0, &miss);
+ }
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or FPU is unsupported.
@@ -6919,10 +7211,33 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
CpuFeatures::Scope scope(FPU);
// Load left and right operand.
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
__ Subu(a2, a0, Operand(kHeapObjectTag));
__ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
+
+ __ bind(&done);
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
Label fpu_eq, fpu_lt;
@@ -6946,15 +7261,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
-
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
__ bind(&generic_stub);
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
+ __ JumpIfSmi(a1, &unordered);
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
@@ -6971,8 +7287,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
Label miss;
// Registers containing left and right operands respectively.
@@ -6984,14 +7300,14 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
// Check that both operands are heap objects.
__ JumpIfEitherSmi(left, right, &miss);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
__ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
__ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
__ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(tmp1, tmp1, Operand(tmp2));
- __ And(tmp1, tmp1, kIsSymbolMask);
+ __ And(tmp1, tmp1, kIsInternalizedMask);
__ Branch(&miss, eq, tmp1, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -6999,7 +7315,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
STATIC_ASSERT(EQUAL == 0);
STATIC_ASSERT(kSmiTag == 0);
__ mov(v0, right);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
__ Ret(ne, left, Operand(right));
__ li(v0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -7009,8 +7325,62 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
}
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == eq);
+ Label miss;
+
+ // Registers containing left and right operands respectively.
+ Register left = a1;
+ Register right = a0;
+ Register tmp1 = a2;
+ Register tmp2 = a3;
+
+ // Check that both operands are heap objects.
+ __ JumpIfEitherSmi(left, right, &miss);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+ Label succeed1;
+ __ And(at, tmp1, Operand(kIsInternalizedMask));
+ __ Branch(&succeed1, ne, at, Operand(zero_reg));
+ __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE));
+ __ bind(&succeed1);
+
+ Label succeed2;
+ __ And(at, tmp2, Operand(kIsInternalizedMask));
+ __ Branch(&succeed2, ne, at, Operand(zero_reg));
+ __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE));
+ __ bind(&succeed2);
+
+ // Use a0 as result
+ __ mov(v0, a0);
+
+ // Unique names are compared by identity.
+ Label done;
+ __ Branch(&done, ne, left, Operand(right));
+ // Make sure a0 is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(a0));
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ li(v0, Operand(Smi::FromInt(EQUAL)));
+ __ bind(&done);
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -7049,13 +7419,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized strings. If they are, we're done
// because we already know they are not identical.
if (equality) {
ASSERT(GetCondition() == eq);
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ And(tmp3, tmp1, Operand(tmp2));
- __ And(tmp5, tmp3, Operand(kIsSymbolMask));
+ __ And(tmp5, tmp3, Operand(kIsInternalizedMask));
Label is_symbol;
__ Branch(&is_symbol, eq, tmp5, Operand(zero_reg));
// Make sure a0 is non-zero. At this point input operands are
@@ -7095,7 +7465,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -7219,10 +7589,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
// Push return address (accessible to GC through exit frame pc).
// This spot for ra was reserved in EnterExitFrame.
masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
- masm->li(ra,
- Operand(reinterpret_cast<intptr_t>(GetCode().location()),
- RelocInfo::CODE_TARGET),
- CONSTANT_SIZE);
+ intptr_t loc =
+ reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
+ masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
// Call the function.
masm->Jump(t9);
// Make sure the stored 'ra' points to this position.
@@ -7279,11 +7648,11 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label the_hole;
__ Branch(&the_hole, eq, entity_name, Operand(tmp));
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a internalized string.
__ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ lbu(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
- __ And(scratch0, entity_name, Operand(kIsSymbolMask));
+ __ And(scratch0, entity_name, Operand(kIsInternalizedMask));
__ Branch(miss, eq, scratch0, Operand(zero_reg));
__ bind(&the_hole);
@@ -7459,11 +7828,11 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ Branch(&in_dictionary, eq, entry_key, Operand(key));
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not a internalized string.
__ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ lbu(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
- __ And(result, entry_key, Operand(kIsSymbolMask));
+ __ And(result, entry_key, Operand(kIsInternalizedMask));
__ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg));
}
}
@@ -7497,7 +7866,6 @@ struct AheadOfTimeWriteBarrierStubList {
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
// Used in RegExpExecStub.
{ REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET },
// Used in CompileArrayPushCall.
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
// Also used in KeyedStoreIC::GenerateGeneric.
@@ -7553,13 +7921,14 @@ bool StoreBufferOverflowStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -7568,7 +7937,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -7670,12 +8039,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- __ Move(a1, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ lw(a1, MemOperand(address, 0));
- }
+ __ Move(a1, address);
__ li(a2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7837,7 +8201,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a1,
+ __ StoreNumberToDoubleElements(a0, a3,
// Overwrites all regs after this.
t1, t2, t3, t5, a2,
&slow_elements);
@@ -7846,6 +8210,21 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ ASSERT(!Serializer::enabled());
+ bool save_fp_regs = CpuFeatures::IsSupported(FPU);
+ CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ lw(a1, MemOperand(fp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ sll(a1, a1, kPointerSizeLog2);
+ __ Addu(sp, sp, a1);
+ __ Ret();
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
ProfileEntryHookStub stub;
diff --git a/src/3rdparty/v8/src/mips/code-stubs-mips.h b/src/3rdparty/v8/src/mips/code-stubs-mips.h
index e0954d8..cc7ac28 100644
--- a/src/3rdparty/v8/src/mips/code-stubs-mips.h
+++ b/src/3rdparty/v8/src/mips/code-stubs-mips.h
@@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
@@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -67,7 +67,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -120,9 +120,9 @@ class UnaryOpStub: public CodeStub {
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateNumberStubSub(MacroAssembler* masm);
+ void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@@ -143,108 +143,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- use_fpu_ = CpuFeatures::IsSupported(FPU);
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- use_fpu_(FPUBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
- bool use_fpu_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class FPUBits: public BitField<bool, 9, 1> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FPUBits::encode(use_fpu_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiSmiOperation(MacroAssembler* masm);
- void GenerateFPOperation(MacroAssembler* masm,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -275,14 +173,14 @@ class StringHelper : public AllStatic {
int flags);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register r0.
// Contents of both c1 and c2 registers are modified. At the exit c1 is
// guaranteed to contain halfword with low and high bytes equal to
// initial contents of c1 and c2 respectively.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -322,7 +220,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -345,7 +243,7 @@ class StringAddStub: public CodeStub {
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -357,7 +255,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() { }
@@ -398,7 +296,7 @@ class StringCompareStub: public CodeStub {
// This stub can convert a signed int32 to a heap number (double). It does
// not work for int32s that are in Smi range! No GC occurs during this stub
// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public:
WriteInt32ToHeapNumberStub(Register the_int,
Register the_heap_number,
@@ -415,7 +313,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
}
bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private:
Register the_int_;
@@ -442,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub {
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -468,7 +366,7 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -492,7 +390,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
@@ -614,7 +512,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotOneOf(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(r1)) continue;
if (candidate.is(r2)) continue;
@@ -673,7 +571,7 @@ class RecordWriteStub: public CodeStub {
// Enter C code from generated RegExp code in a way that allows
// the C code to fix the return address in case of a GC.
// Currently only needed on ARM and MIPS.
-class RegExpCEntryStub: public CodeStub {
+class RegExpCEntryStub: public PlatformCodeStub {
public:
RegExpCEntryStub() {}
virtual ~RegExpCEntryStub() {}
@@ -691,7 +589,7 @@ class RegExpCEntryStub: public CodeStub {
// keep the code which called into native pinned in the memory. Currently the
// simplest approach is to generate such stub early enough so it can never be
// moved by GC
-class DirectCEntryStub: public CodeStub {
+class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
void Generate(MacroAssembler* masm);
@@ -724,20 +622,6 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
- // Loads objects from a0 and a1 (right and left in binary operations) into
- // floating point registers. Depending on the destination the values ends up
- // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
- // is floating point registers FPU must be supported. If core registers are
- // requested when FPU is supported f12 and f14 will still be scratched. If
- // either a0 or a1 is not a number (not smi and not heap number object) the
- // not_number label is jumped to with a0 and a1 intact.
- static void LoadOperands(MacroAssembler* masm,
- FloatingPointHelper::Destination destination,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* not_number);
-
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -773,6 +657,7 @@ class FloatingPointHelper : public AllStatic {
Register object,
Destination destination,
FPURegister double_dst,
+ FPURegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -794,7 +679,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch,
+ FPURegister double_scratch0,
+ FPURegister double_scratch1,
Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
@@ -834,7 +720,12 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- private:
+ // Loads the objects from |object| into floating point registers.
+ // Depending on |destination| the value ends up either in |dst| or
+ // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
+ // must be supported. If kCoreRegisters are requested and FPU is
+ // supported, |dst| will be scratched. If |object| is neither smi nor
+ // heap number, |not_number| is jumped to with |object| still intact.
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
@@ -848,7 +739,7 @@ class FloatingPointHelper : public AllStatic {
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.cc b/src/3rdparty/v8/src/mips/codegen-mips.cc
index 44e0359..bbb1a31 100644
--- a/src/3rdparty/v8/src/mips/codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/codegen-mips.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
+#include "simulator-mips.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_mips_machine_code = NULL;
+double fast_exp_simulator(double x) {
+ return Simulator::current(Isolate::Current())->CallFP(
+ fast_exp_mips_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!CpuFeatures::IsSupported(FPU)) return &exp;
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+ {
+ CpuFeatures::Scope use_fpu(FPU);
+ DoubleRegister input = f12;
+ DoubleRegister result = f0;
+ DoubleRegister double_scratch1 = f4;
+ DoubleRegister double_scratch2 = f6;
+ Register temp1 = t0;
+ Register temp2 = t1;
+ Register temp3 = t2;
+
+ if (!IsMipsSoftFloatABI) {
+ // Input value is in f12 anyway, nothing to do.
+ } else {
+ __ Move(input, a0, a1);
+ }
+ __ Push(temp3, temp2, temp1);
+ MathExpGenerator::EmitMathExp(
+ &masm, input, result, double_scratch1, double_scratch2,
+ temp1, temp2, temp3);
+ __ Pop(temp3, temp2, temp1);
+ if (!IsMipsSoftFloatABI) {
+ // Result is already in f0, nothing to do.
+ } else {
+ __ Move(a0, a1, result);
+ }
+ __ Ret();
+ }
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+ fast_exp_mips_machine_code = buffer;
+ return &fast_exp_simulator;
+#endif
+}
+
+
+#undef __
+
+
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -72,8 +141,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
+#define __ ACCESS_MASM(masm)
+
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -82,6 +154,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- a3 : target map, scratch for subsequent call
// -- t0 : scratch (elements)
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_site_info_found != NULL);
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq,
+ allocation_site_info_found);
+ }
+
// Set transitioned map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
__ RecordWriteField(a2,
@@ -96,7 +174,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -110,6 +188,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
Register scratch = t6;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -176,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
a3,
t5,
- kRAHasBeenSaved,
+ kRAHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -239,7 +321,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : key
@@ -250,6 +332,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label entry, loop, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
@@ -389,7 +475,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(at, Heap::kempty_stringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
__ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
@@ -408,7 +494,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -446,6 +532,196 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ And(at, index, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi index", at, Operand(zero_reg));
+ __ And(at, value, Operand(kSmiTagMask));
+ __ Check(eq, "Non-smi value", at, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, String::kLengthOffset));
+ __ Check(lt, "Index is too large", index, Operand(at));
+
+ __ Check(ge, "Index is negative", index, Operand(zero_reg));
+
+ __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+ __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
+ }
+
+ __ Addu(at,
+ string,
+ Operand(SeqString::kHeaderSize - kHeapObjectTag));
+ __ SmiUntag(value);
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ SmiUntag(index);
+ __ Addu(at, at, index);
+ __ sb(value, MemOperand(at));
+ } else {
+ // No need to untag a smi for two-byte addressing.
+ __ Addu(at, at, index);
+ __ sh(value, MemOperand(at));
+ }
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+ return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch1));
+ ASSERT(!input.is(double_scratch2));
+ ASSERT(!result.is(double_scratch1));
+ ASSERT(!result.is(double_scratch2));
+ ASSERT(!double_scratch1.is(double_scratch2));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(!temp1.is(temp3));
+ ASSERT(!temp2.is(temp3));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+ __ ldc1(double_scratch1, ExpConstant(0, temp3));
+ __ Move(result, kDoubleRegZero);
+ __ BranchF(&done, NULL, ge, double_scratch1, input);
+ __ ldc1(double_scratch2, ExpConstant(1, temp3));
+ __ ldc1(result, ExpConstant(2, temp3));
+ __ BranchF(&done, NULL, ge, input, double_scratch2);
+ __ ldc1(double_scratch1, ExpConstant(3, temp3));
+ __ ldc1(result, ExpConstant(4, temp3));
+ __ mul_d(double_scratch1, double_scratch1, input);
+ __ add_d(double_scratch1, double_scratch1, result);
+ __ Move(temp2, temp1, double_scratch1);
+ __ sub_d(double_scratch1, double_scratch1, result);
+ __ ldc1(result, ExpConstant(6, temp3));
+ __ ldc1(double_scratch2, ExpConstant(5, temp3));
+ __ mul_d(double_scratch1, double_scratch1, double_scratch2);
+ __ sub_d(double_scratch1, double_scratch1, input);
+ __ sub_d(result, result, double_scratch1);
+ __ mul_d(input, double_scratch1, double_scratch1);
+ __ mul_d(result, result, input);
+ __ srl(temp1, temp2, 11);
+ __ ldc1(double_scratch2, ExpConstant(7, temp3));
+ __ mul_d(result, result, double_scratch2);
+ __ sub_d(result, result, double_scratch1);
+ __ ldc1(double_scratch2, ExpConstant(8, temp3));
+ __ add_d(result, result, double_scratch2);
+ __ li(at, 0x7ff);
+ __ And(temp2, temp2, at);
+ __ Addu(temp1, temp1, Operand(0x3ff));
+ __ sll(temp1, temp1, 20);
+
+ // Must not call ExpConstant() after overwriting temp3!
+ __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
+ __ sll(at, temp2, 3);
+ __ addu(at, at, temp3);
+ __ lw(at, MemOperand(at));
+ __ Addu(temp3, temp3, Operand(kPointerSize));
+ __ sll(temp2, temp2, 3);
+ __ addu(temp2, temp2, temp3);
+ __ lw(temp2, MemOperand(temp2));
+ __ Or(temp1, temp1, temp2);
+ __ Move(input, at, temp1);
+ __ mul_d(result, result, input);
+ __ bind(&done);
+}
+
+
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+
+static byte* GetNoCodeAgeSequence(uint32_t* length) {
+ // The sequence of instructions that is patched out for aging code is the
+ // following boilerplate stack-building prologue that is found in FUNCTIONS
+ static bool initialized = false;
+ static uint32_t sequence[kNoCodeAgeSequenceLength];
+ byte* byte_sequence = reinterpret_cast<byte*>(sequence);
+ *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
+ if (!initialized) {
+ CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
+ patcher.masm()->Push(ra, fp, cp, a1);
+ patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
+ initialized = true;
+ }
+ return byte_sequence;
+}
+
+
+bool Code::IsYoungSequence(byte* sequence) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ bool result = !memcmp(sequence, young_sequence, young_length);
+ ASSERT(result ||
+ Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
+ return result;
+}
+
+
+void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
+ MarkingParity* parity) {
+ if (IsYoungSequence(sequence)) {
+ *age = kNoAge;
+ *parity = NO_MARKING_PARITY;
+ } else {
+ Address target_address = Memory::Address_at(
+ sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
+ Code* stub = GetCodeFromTargetAddress(target_address);
+ GetCodeAgeAndParity(stub, age, parity);
+ }
+}
+
+
+void Code::PatchPlatformCodeAge(byte* sequence,
+ Code::Age age,
+ MarkingParity parity) {
+ uint32_t young_length;
+ byte* young_sequence = GetNoCodeAgeSequence(&young_length);
+ if (age == kNoAge) {
+ memcpy(sequence, young_sequence, young_length);
+ CPU::FlushICache(sequence, young_length);
+ } else {
+ Code* stub = GetCodeAgeStub(age, parity);
+ CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+ // Mark this code sequence for FindPlatformCodeAgeSequence()
+ patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+ // Save the function's original return address
+ // (it will be clobbered by Call(t9))
+ patcher.masm()->mov(at, ra);
+ // Load the stub address to t9 and call it
+ patcher.masm()->li(t9,
+ Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
+ patcher.masm()->Call(t9);
+ // Record the stub address in the empty space for GetCodeAgeAndParity()
+ patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
+ }
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/mips/codegen-mips.h b/src/3rdparty/v8/src/mips/codegen-mips.h
index e704c4f..d429443 100644
--- a/src/3rdparty/v8/src/mips/codegen-mips.h
+++ b/src/3rdparty/v8/src/mips/codegen-mips.h
@@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -70,6 +74,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -90,6 +96,22 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ DoubleRegister input,
+ DoubleRegister result,
+ DoubleRegister double_scratch1,
+ DoubleRegister double_scratch2,
+ Register temp1,
+ Register temp2,
+ Register temp3);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/3rdparty/v8/src/mips/constants-mips.cc b/src/3rdparty/v8/src/mips/constants-mips.cc
index 7d654f6..ddfa891 100644
--- a/src/3rdparty/v8/src/mips/constants-mips.cc
+++ b/src/3rdparty/v8/src/mips/constants-mips.cc
@@ -302,6 +302,8 @@ Instruction::Type Instruction::InstructionType() const {
return kRegisterType;
};
break;
+ case COP1X:
+ return kRegisterType;
// 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
case REGIMM:
case BEQ:
diff --git a/src/3rdparty/v8/src/mips/constants-mips.h b/src/3rdparty/v8/src/mips/constants-mips.h
index 3d58571..e7c55f5 100644
--- a/src/3rdparty/v8/src/mips/constants-mips.h
+++ b/src/3rdparty/v8/src/mips/constants-mips.h
@@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1;
// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
const int kFCSRRegister = 31;
const int kInvalidFPUControlRegister = -1;
-const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
// FCSR constants.
const uint32_t kFCSRInexactFlagBit = 2;
@@ -216,6 +216,8 @@ const int kImm28Bits = 28;
// and are therefore shifted by 2.
const int kImmFieldShift = 2;
+const int kFrBits = 5;
+const int kFrShift = 21;
const int kFsShift = 11;
const int kFsBits = 5;
const int kFtShift = 16;
@@ -295,7 +297,9 @@ enum Opcode {
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
SWC1 = ((7 << 3) + 1) << kOpcodeShift,
- SDC1 = ((7 << 3) + 5) << kOpcodeShift
+ SDC1 = ((7 << 3) + 5) << kOpcodeShift,
+
+ COP1X = ((1 << 4) + 3) << kOpcodeShift
};
enum SecondaryField {
@@ -416,6 +420,8 @@ enum SecondaryField {
CVT_S_L = ((4 << 3) + 0),
CVT_D_L = ((4 << 3) + 1),
// COP1 Encoding of Function Field When rs=PS.
+ // COP1X Encoding of Function Field.
+ MADD_D = ((4 << 3) + 1),
NULLSF = 0
};
@@ -677,6 +683,10 @@ class Instruction {
return Bits(kFtShift + kFtBits - 1, kFtShift);
}
+ inline int FrValue() const {
+ return Bits(kFrShift + kFrBits -1, kFrShift);
+ }
+
// Float Compare condition code instruction bits.
inline int FCccValue() const {
return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
diff --git a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
index 9fd815b..8e96cd5 100644
--- a/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
+++ b/src/3rdparty/v8/src/mips/deoptimizer-mips.cc
@@ -42,11 +42,14 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction* function) {
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
AssertNoAllocation no_allocation;
- if (!function->IsOptimized()) return;
+ ASSERT(function->IsOptimized());
+ ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
@@ -70,14 +73,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
for (int i = 0; i < deopt_data->DeoptCount(); i++) {
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
- Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
+ Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ RelocInfo::NONE32);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
CodePatcher patcher(call_address, call_size_in_words);
- patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
+ patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -87,8 +90,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif
}
- Isolate* isolate = code->GetIsolate();
-
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
@@ -120,7 +121,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitStackCheck.
+ // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
// The call of the stack guard check has the following form:
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
@@ -170,11 +171,7 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->slt(at, a3, zero_reg);
- } else {
- patcher.masm()->sltu(at, sp, t0);
- }
+ patcher.masm()->slt(at, a3, zero_reg);
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
@@ -209,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
int bailout_id = LookupBailoutId(data, BailoutId(ast_id));
@@ -243,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -335,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
uint32_t pc = reinterpret_cast<uint32_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
@@ -352,99 +349,185 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+
+ // | | saved frame (fp) | | saved frame (fp) |
+ // | +=========================+<-fp +=========================+<-fp
+ // | | JSFunction context | | JSFunction context |
+ // v +-------------------------+ +-------------------------|
+ // | COMPILED_STUB marker | | STUB_FAILURE marker |
+ // +-------------------------+ +-------------------------+
+ // | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
+ // |-------------------------|<-sp +-------------------------+
+ // | caller args pointer |
+ // +-------------------------+
+ // | caller stack param 1 |
+ // parameters in registers +-------------------------+
+ // and spilled to stack | .... |
+ // +-------------------------+
+ // | caller stack param n |
+ // +-------------------------+<-sp
+ // s0-s1 = number of parameters
+ // s2 = failure handler address
+ // fp = saved frame
+ // cp = JSFunction context
+ //
+
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+
+ // The output frame must have room for all pushed register parameters
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
}
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+ // The stub failure trampoline is a single frame.
- // Allocate and store the output frame description.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- uint32_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ intptr_t top_address = input_->GetRegister(fp.code()) - (2 * kPointerSize) -
+ height_in_bytes;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
+ // Read caller's PC (JSFunction continuation) from the input frame.
+ intptr_t input_frame_offset = input_frame_size - kPointerSize;
+ intptr_t output_frame_offset = output_frame_size - kPointerSize;
+ intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ // Read caller's FP from the input frame, and set this frame's FP.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(fp.code());
+ output_frame->SetRegister(fp.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
- fp_value, output_offset, value);
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
+ // The context can be gotten from the input frame.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(cp.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
- top_address + output_offset, output_offset, value);
+ // A marker value is used in place of the function.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (stub fail sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ int caller_arg_count = 0;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ caller_arg_count =
+ input_->GetRegister(descriptor->stack_parameter_count_->code());
}
- ASSERT(0 == output_offset);
+ // Build the Arguments object for the caller's parameters and a pointer to it.
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.arguments\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- uint32_t pc = reinterpret_cast<uint32_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc);
+ output_frame_offset -= kPointerSize;
+ value = caller_arg_count;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args.length\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr - (output_frame_size - output_frame_offset) -
+ StandardFrameConstants::kMarkerOffset + kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Copy the register parameters to the failure frame.
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ output_frame_offset -= kPointerSize;
+ DoTranslateCommand(iterator, 0, output_frame_offset);
+ }
+
+ ASSERT(0 == output_frame_offset);
+
+ for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+
+ ApiFunction function(descriptor->deoptimization_handler_);
+ ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+ intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+ int params = descriptor->register_param_count_;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ params++;
+ }
+ output_frame->SetRegister(s0.code(), params);
+ output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
+ output_frame->SetRegister(s2.code(), handler);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ int extra = descriptor->extra_expression_stack_count_;
+ StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -501,7 +584,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -510,7 +593,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -519,7 +602,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
}
@@ -528,7 +611,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
top_address + output_offset, output_offset, value);
}
@@ -537,7 +620,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
}
@@ -546,7 +629,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
top_address + output_offset, output_offset, value);
}
@@ -556,7 +639,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
top_address + output_offset, output_offset, value);
}
@@ -570,124 +653,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type,
- // code object, see MacroAssembler::EnterFrame). For a setter stub frame we
- // need one additional entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
@@ -705,7 +670,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -769,7 +734,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
top_address + output_offset, output_offset, value);
}
@@ -792,7 +757,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
if (is_topmost) {
output_frame->SetRegister(fp.code(), fp_value);
}
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
fp_value, output_offset, value);
}
@@ -810,7 +775,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(cp.code(), value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
top_address + output_offset, output_offset, value);
}
@@ -823,7 +788,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
top_address + output_offset, output_offset, value);
}
@@ -871,7 +836,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -892,7 +857,6 @@ void Deoptimizer::EntryGenerator::Generate() {
Isolate* isolate = masm()->isolate();
- CpuFeatures::Scope scope(FPU);
// Unlike on ARM we don't save all the registers, just the useful ones.
// For the rest, there are gaps on the stack, so the offsets remain the same.
const int kNumberOfRegisters = Register::kNumRegisters;
@@ -901,14 +865,19 @@ void Deoptimizer::EntryGenerator::Generate() {
RegList saved_regs = restored_regs | sp.bit() | ra.bit();
const int kDoubleRegsSize =
- kDoubleSize * FPURegister::kNumAllocatableRegisters;
-
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
+ kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
+ }
+ } else {
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -980,14 +949,17 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
+ }
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -1008,11 +980,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// frame description.
__ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
__ bind(&pop_loop);
__ pop(t0);
__ sw(t0, MemOperand(a3, 0));
- __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
- __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
+ __ addiu(a3, a3, sizeof(uint32_t));
+ __ bind(&pop_loop_header);
+ __ Branch(&pop_loop, ne, a2, Operand(sp));
// Compute the output frame in the deoptimizer.
__ push(a0); // Preserve deoptimizer object across call.
@@ -1027,27 +1002,42 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
- Label outer_push_loop, inner_push_loop;
- // Outer loop state: a0 = current "FrameDescription** output_",
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
+ // Outer loop state: t0 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
__ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
+ __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
__ sll(a1, a1, kPointerSizeLog2); // Count to offset.
- __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
+ __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**.
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ lw(a2, MemOperand(a0, 0)); // output_[ix]
+ __ lw(a2, MemOperand(t0, 0)); // output_[ix]
__ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ Subu(a3, a3, Operand(sizeof(uint32_t)));
__ Addu(t2, a2, Operand(a3));
__ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
__ push(t3);
+ __ bind(&inner_loop_header);
__ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
- __ Addu(a0, a0, Operand(kPointerSize));
- __ Branch(&outer_push_loop, lt, a0, Operand(a1));
+ __ Addu(t0, t0, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ Branch(&outer_push_loop, lt, t0, Operand(a1));
+
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ }
+ }
// Push state, pc, and continuation from the last output frame.
if (type() != OSR) {
diff --git a/src/3rdparty/v8/src/mips/disasm-mips.cc b/src/3rdparty/v8/src/mips/disasm-mips.cc
index 1d40c2c..0eca71f 100644
--- a/src/3rdparty/v8/src/mips/disasm-mips.cc
+++ b/src/3rdparty/v8/src/mips/disasm-mips.cc
@@ -350,6 +350,10 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
int reg = instr->FdValue();
PrintFPURegister(reg);
return 2;
+ } else if (format[1] == 'r') { // 'fr: fr register.
+ int reg = instr->FrValue();
+ PrintFPURegister(reg);
+ return 2;
}
UNREACHABLE();
return -1;
@@ -618,6 +622,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
}
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft");
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
diff --git a/src/3rdparty/v8/src/mips/frames-mips.h b/src/3rdparty/v8/src/mips/frames-mips.h
index 2ed358a..188e7d1 100644
--- a/src/3rdparty/v8/src/mips/frames-mips.h
+++ b/src/3rdparty/v8/src/mips/frames-mips.h
@@ -193,30 +193,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-
- // Size of the MIPS 4 32-bit argument slots.
- // This is just an alias with a shorter name. Use it from now on.
- static const int kRArgsSlotsSize = 4 * kPointerSize;
- static const int kRegularArgsSlotsSize = kRArgsSlotsSize;
-
- // JS argument slots size.
- static const int kJSArgsSlotsSize = 0 * kPointerSize;
- // Assembly builtins argument slots size.
- static const int kBArgsSlotsSize = 0 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/src/3rdparty/v8/src/mips/full-codegen-mips.cc b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
index 46c9ecb..9173422 100644
--- a/src/3rdparty/v8/src/mips/full-codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/full-codegen-mips.cc
@@ -139,7 +139,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -147,7 +147,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ stop("stop-at");
}
#endif
@@ -172,12 +172,13 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
+ info->set_prologue_offset(masm_->pc_offset());
+ // The following three instructions must remain together and unmodified for
+ // code aging to work properly.
__ Push(ra, fp, cp, a1);
- if (locals_count > 0) {
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- }
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
@@ -346,45 +347,34 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Stack check");
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -686,7 +676,7 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* fall_through) {
if (CpuFeatures::IsSupported(FPU)) {
ToBooleanStub stub(result_register());
- __ CallStub(&stub);
+ __ CallStub(&stub, condition->test_id());
__ mov(at, zero_reg);
} else {
// Call the runtime to find the boolean value of the source and then
@@ -824,7 +814,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
case Variable::PARAMETER:
@@ -886,7 +877,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
}
@@ -932,35 +924,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ li(a1, Operand(instance));
- __ sw(a1, ContextOperand(cp, variable->index()));
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
+ __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ sw(a1, ContextOperand(cp, variable->index()));
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(cp,
+ Context::SlotOffset(variable->index()),
+ a1,
+ a3,
+ kRAHasBeenSaved,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -1003,6 +993,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1053,7 +1051,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1178,7 +1176,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(a1, cell);
__ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker)));
@@ -1255,7 +1254,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
@@ -1348,7 +1347,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
__ bind(&fast);
}
- __ lw(a0, var->is_qml_global() ? QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ li(a2, Operand(var->name()));
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
? RelocInfo::CODE_TARGET
@@ -1403,9 +1404,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
@@ -1435,7 +1436,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in a2 and the global
// object (receiver) in a0.
- __ lw(a0, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ li(a2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
@@ -1593,7 +1596,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_properties));
@@ -1604,12 +1607,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
- __ Push(a3, a2, a1, a0);
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
FastCloneShallowObjectStub stub(properties_count);
@@ -1643,7 +1647,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ mov(a0, result_register());
@@ -1737,7 +1741,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
- FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
+ FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
+ length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
@@ -1748,10 +1754,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode = has_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
+ if (has_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1962,7 +1975,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done);
@@ -2046,7 +2059,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(a1);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(v0);
@@ -2054,7 +2067,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2115,7 +2128,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
// Global var, const, or let.
__ mov(a0, result_register());
__ li(a2, Operand(var->name()));
- __ lw(a1, var->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a1, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2360,7 +2375,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) {
CallFunctionStub stub(arg_count, flags);
__ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ CallStub(&stub);
+ __ CallStub(&stub, expr->CallFeedbackId());
RecordJSReturnSite(expr);
// Restore context register.
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2410,7 +2425,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -2450,7 +2465,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
context()->DropAndPlug(1, v0);
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Push global object as receiver for the call IC.
- __ lw(a0, proxy->var()->is_qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ lw(a0, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ push(a0);
EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
@@ -2558,7 +2575,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a2, Operand(cell));
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(v0);
}
@@ -2711,7 +2728,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ LoadRoot(t0, Heap::kHashTableMapRootIndex);
__ Branch(if_false, eq, a2, Operand(t0));
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2736,10 +2753,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ Addu(a2, a2, t1);
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
- // The use of t2 to store the valueOf symbol asumes that it is not otherwise
+ // string "valueOf" the result is false.
+ // The use of t2 to store the valueOf string assumes that it is not otherwise
// used in the loop below.
- __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex);
+ __ li(t2, Operand(FACTORY->value_of_string()));
__ jmp(&entry);
__ bind(&loop);
__ lw(a3, MemOperand(t0, 0));
@@ -2771,6 +2788,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
+void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(v0, if_false);
+ __ GetObjectType(v0, a1, a2);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(eq, a2, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2970,12 +3009,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(v0, Heap::kObject_symbolRootIndex);
+ __ LoadRoot(v0, Heap::kObject_stringRootIndex);
__ jmp(&done);
// Non-JS objects have class null.
@@ -3157,6 +3196,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(a2);
+ __ pop(a1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(a2);
+ __ pop(a1);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3313,7 +3384,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3628,7 +3699,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
__ SmiUntag(array_length);
__ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Branch(&done);
__ bind(&non_trivial_array);
@@ -3664,7 +3735,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3691,7 +3762,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3731,10 +3802,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3751,7 +3822,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3761,7 +3832,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3783,7 +3854,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3804,7 +3875,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3812,7 +3883,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
@@ -3889,7 +3960,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
- __ lw(a2, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ lw(a2, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ li(a1, Operand(var->name()));
__ li(a0, Operand(Smi::FromInt(kNonStrictMode)));
__ Push(a2, a1, a0);
@@ -4012,7 +4085,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
__ mov(a0, result_register());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(v0);
}
@@ -4108,9 +4181,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
- __ li(a1, Operand(Smi::FromInt(count_value)));
-
if (ShouldInlineSmiCase(expr->op())) {
+ __ li(a1, Operand(Smi::FromInt(count_value)));
__ AdduAndCheckForOverflow(v0, a0, a1, t0);
__ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
@@ -4119,12 +4191,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitJumpIfSmi(v0, &done);
__ bind(&stub_call);
}
+ __ mov(a1, a0);
+ __ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4196,7 +4272,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
- __ lw(a0, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ lw(a0, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ li(a2, Operand(proxy->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
@@ -4240,12 +4318,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(v0, if_true);
__ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => false.
__ GetObjectType(v0, v0, a1);
@@ -4254,16 +4332,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(eq, a1, Operand(zero_reg),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
@@ -4272,19 +4350,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(v0, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ GetObjectType(v0, v0, a1);
__ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(v0, if_false);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
}
+ if (FLAG_harmony_symbols) {
+ __ GetObjectType(v0, v0, a1);
+ __ Branch(if_true, eq, a1, Operand(SYMBOL_TYPE));
+ }
// Check for JS objects => true.
__ GetObjectType(v0, v0, a1);
__ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
@@ -4341,29 +4423,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = eq;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = eq;
- break;
- case Token::LT:
- cc = lt;
- break;
- case Token::GT:
- cc = gt;
- break;
- case Token::LTE:
- cc = le;
- break;
- case Token::GTE:
- cc = ge;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ mov(a0, result_register());
__ pop(a1);
@@ -4378,7 +4438,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
}
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
diff --git a/src/3rdparty/v8/src/mips/ic-mips.cc b/src/3rdparty/v8/src/mips/ic-mips.cc
index cf70681..24b1b0f 100644
--- a/src/3rdparty/v8/src/mips/ic-mips.cc
+++ b/src/3rdparty/v8/src/mips/ic-mips.cc
@@ -215,53 +215,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- ra : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss,
- support_wrappers);
- // Cache miss: Jump to runtime.
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a2 : name
- // -- lr : return address
- // -- a0 : receiver
- // -- sp[0] : receiver
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -352,30 +305,30 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if a key is a symbol.
+// Checks whether a key is an array index string or an internalized string.
+// Falls through if a key is an internalized string.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
- Label* not_symbol) {
+ Label* not_internalized) {
// The key is not a smi.
// Is it a string?
__ GetObjectType(key, map, hash);
- __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(not_internalized, ge, hash, Operand(FIRST_NONSTRING_TYPE));
// Is the string an array index, with cached numeric value?
__ lw(hash, FieldMemOperand(key, String::kHashFieldOffset));
__ And(at, hash, Operand(String::kContainsCachedArrayIndexMask));
__ Branch(index_string, eq, at, Operand(zero_reg));
- // Is the string a symbol?
+ // Is the string internalized?
// map: key map
__ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
- __ And(at, hash, Operand(kIsSymbolMask));
- __ Branch(not_symbol, eq, at, Operand(zero_reg));
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ And(at, hash, Operand(kIsInternalizedMask));
+ __ Branch(not_internalized, eq, at, Operand(zero_reg));
}
@@ -630,7 +583,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&check_string);
GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be internalized.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -657,7 +610,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor an internalized string,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub,
// that will get fetched next time.
@@ -700,7 +653,8 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
Isolate::Current()->stub_cache()->GenerateProbe(
masm, flags, a0, a2, a3, t0, t1, t2);
@@ -858,7 +812,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -893,7 +847,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0); // (In delay slot) return the value stored in v0.
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -926,7 +880,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
Object* KeyedLoadIC_Miss(Arguments args);
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- ra : return address
// -- a0 : key
@@ -939,7 +893,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ Push(a1, a0);
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
@@ -1038,7 +992,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
__ And(a3, a3, Operand(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -1166,7 +1120,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1268,7 +1222,6 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
- receiver,
elements, // Overwritten.
a3, // Scratch regs...
t0,
@@ -1296,7 +1249,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -1308,7 +1263,9 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -1322,7 +1279,8 @@ static void KeyedStoreGenerateGenericHelper(
t0,
slow);
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1453,11 +1411,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ---------- S t a t e --------------
// -- a0 : value
// -- a1 : key
@@ -1468,7 +1426,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// Push receiver, key and value for runtime call.
__ Push(a2, a1, a0);
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1506,7 +1464,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@@ -1527,7 +1487,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in v0.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a2);
__ bind(&fail);
@@ -1574,62 +1536,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- a0 : value
- // -- a1 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = a1;
- Register value = a0;
- Register scratch = a3;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ GetObjectType(receiver, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
- __ GetObjectType(scratch, scratch, scratch);
- __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
- __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
- __ LoadRoot(at, Heap::kHashTableMapRootIndex);
- __ Branch(&miss, eq, scratch, Operand(at));
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ Push(receiver, value);
-
- ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength),
- masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : value
@@ -1695,36 +1601,16 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
- State state = TargetState(previous_state, false, x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
+bool CompareIC::HasInlinedSmiCode(Address address) {
+ // The address of the instruction following the call.
+ Address andi_instruction_address =
+ address + Assembler::kCallTargetAddressOffset;
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ // If the instruction following the call is not a andi at, rx, #yyy, nothing
+ // was inlined.
+ Instr instr = Assembler::instr_at(andi_instruction_address);
+ return Assembler::IsAndImmediate(instr) &&
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
}
@@ -1736,7 +1622,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// was inlined.
Instr instr = Assembler::instr_at(andi_instruction_address);
if (!(Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) {
+ Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
return;
}
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
index b268fb3..16d7c26 100644
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.cc
@@ -67,8 +67,6 @@ bool LCodeGen::GenerateCode() {
status_ = GENERATING;
CpuFeatures::Scope scope(FPU);
- CodeStub::GenerateFPStubs();
-
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
@@ -77,6 +75,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
+ GenerateDeoptJumpTable() &&
GenerateSafepointTable();
}
@@ -85,7 +84,14 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
}
@@ -116,57 +122,95 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ stop("stop_at");
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ stop("stop_at");
+ }
#endif
- // a1: Callee's JS function.
- // cp: Callee's context.
- // fp: Caller's frame pointer.
- // lr: Caller's pc.
-
- // Strict mode functions and builtins need to replace the receiver
- // with undefined when called as functions (without an explicit
- // receiver object). r5 is zero for method calls and non-zero for
- // function calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label ok;
- __ Branch(&ok, eq, t1, Operand(zero_reg));
-
- int receiver_offset = scope()->num_parameters() * kPointerSize;
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ sw(a2, MemOperand(sp, receiver_offset));
- __ bind(&ok);
+ // a1: Callee's JS function.
+ // cp: Callee's context.
+ // fp: Caller's frame pointer.
+ // lr: Caller's pc.
+
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ Branch(&ok, eq, t1, Operand(zero_reg));
+
+ int receiver_offset = scope()->num_parameters() * kPointerSize;
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ sw(a2, MemOperand(sp, receiver_offset));
+ __ bind(&ok);
+ }
}
- __ Push(ra, fp, cp, a1);
- __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ if (info()->IsStub()) {
+ __ Push(ra, fp, cp);
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ __ Push(ra, fp, cp, a1);
+ // Add unused load of ip to ensure prologue sequence is identical for
+ // full-codegen and lithium-codegen.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ // Adj. FP to point to saved FP.
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
+ frame_is_built_ = true;
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ li(a0, Operand(slots));
- __ li(a2, Operand(kSlotsZapValue));
+ __ Subu(sp, sp, Operand(slots * kPointerSize));
+ __ push(a0);
+ __ push(a1);
+ __ Addu(a0, sp, Operand(slots * kPointerSize));
+ __ li(a1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(a2);
- __ Subu(a0, a0, 1);
- __ Branch(&loop, ne, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(kPointerSize));
+ __ sw(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Branch(&loop, ne, a0, Operand(sp));
+ __ pop(a1);
+ __ pop(a0);
} else {
__ Subu(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in a1.
__ push(a1);
@@ -201,7 +245,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
EnsureSpaceForLazyDeopt();
@@ -222,7 +266,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -236,10 +303,31 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ __ pop(at);
+ __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+ frame_is_built_ = false;
+ }
__ jmp(code->exit());
}
}
@@ -251,10 +339,81 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
- // TODO(plind): not clear that this will have advantage for MIPS.
- // Skipping it for now. Raised issue #100 for this.
- Abort("Unimplemented: GenerateDeoptJumpTable");
- return false;
+ // Check that the jump table is accessible from everywhere in the function
+ // code, i.e. that offsets to the table can be encoded in the 16bit signed
+ // immediate of a branch instruction.
+ // To simplify we consider the code size from the first instruction to the
+ // end of the jump table.
+ if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) +
+ deopt_jump_table_.length() * 12)) {
+ Abort("Generated code is too large");
+ }
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ __ RecordComment("[ Deoptimization jump table");
+ Label table_start;
+ __ bind(&table_start);
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
+ for (int i = 0; i < deopt_jump_table_.length(); i++) {
+ __ bind(&deopt_jump_table_[i].label);
+ Address entry = deopt_jump_table_[i].address;
+ bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt;
+ Deoptimizer::BailoutType type =
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
+ if (deopt_jump_table_[i].needs_frame) {
+ if (is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ Branch(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Call(t9);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ Branch(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Jump(t9);
+ }
+ }
+ } else {
+ if (is_lazy_deopt) {
+ __ Call(t9);
+ } else {
+ __ Jump(t9);
+ }
+ }
+ }
+ __ RecordComment("]");
+
+ // The deoptimization jump table is the last part of the instruction
+ // sequence. Mark the generated code as done unless we bailed out.
+ if (!is_aborted()) status_ = DONE;
+ return !is_aborted();
}
@@ -363,8 +522,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
@@ -405,37 +562,20 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return MemOperand(fp, -(index - 1) * kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()));
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, context,
- // and the first word of the double in the fixed part of the frame.
- return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
- } else {
- // Incoming parameter. Skip the return address and the first word of
- // the double.
- return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
- }
+ return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ int* pushed_arguments_index,
+ int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -447,14 +587,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
+ *pushed_arguments_index = -environment->parameter_count();
+ *pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ pushed_arguments_index,
+ pushed_arguments_count);
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -475,19 +617,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
ASSERT(height == 0);
translation->BeginSetterStubFrame(closure_id);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
}
// Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
+ // bumped and another stack area to be used for materialization,
+ // otherwise actual argument values are unknown for inlined frames.
+ bool arguments_known = true;
+ int arguments_index = *pushed_arguments_index;
+ int arguments_count = *pushed_arguments_count;
+ if (environment->entry() != NULL) {
+ arguments_known = environment->entry()->arguments_pushed();
+ arguments_index = arguments_index < 0
+ ? GetStackSlotCount() : arguments_index + arguments_count;
+ arguments_count = environment->entry()->arguments_count() + 1;
+ if (environment->entry()->arguments_pushed()) {
+ *pushed_arguments_index = arguments_index;
+ *pushed_arguments_count = arguments_count;
+ }
}
for (int i = 0; i < translation_size; ++i) {
@@ -502,8 +654,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -513,8 +666,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_double_registers()[value->index()],
false,
false,
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -522,8 +676,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -532,13 +687,15 @@ void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
+ translation->StoreArgumentsObject(
+ arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -661,16 +818,19 @@ void LCodeGen::DeoptimizeIf(Condition cc,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
-
- if (FLAG_deopt_every_n_times == 1 &&
- info_->shared_info()->opt_count() == id) {
+ if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -684,9 +844,51 @@ void LCodeGen::DeoptimizeIf(Condition cc,
__ bind(&skip);
}
- // TODO(plind): The Arm port is a little different here, due to their
- // DeOpt jump table, which is not used for Mips yet.
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool needs_lazy_deopt = info()->IsStub();
+ if (cc == al && frame_is_built_) {
+ if (needs_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
+ }
+ } else {
+ // We often have several deopts to the same entry, reuse the last
+ // jump entry if this is the case.
+ if (deopt_jump_table_.is_empty() ||
+ (deopt_jump_table_.last().address != entry) ||
+ (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
+ (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ deopt_jump_table_.Add(table_entry, zone());
+ }
+ __ Branch(&deopt_jump_table_.last().label, cc, src1, src2);
+ }
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded maps after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
+ }
}
@@ -864,39 +1066,39 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
__ lw(a0, MemOperand(sp, 0));
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -948,6 +1150,14 @@ void LCodeGen::DoModI(LModI* instr) {
DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
}
+ // Check for (kMinInt % -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
+ DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
+ __ bind(&left_not_min_int);
+ }
+
__ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
__ mfhi(result);
@@ -981,7 +1191,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
+ // Check for (kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
@@ -995,6 +1205,18 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+ DoubleRegister addend = ToDoubleRegister(instr->addend());
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+ // This is computed in-place.
+ ASSERT(addend.is(ToDoubleRegister(instr->result())));
+
+ __ madd_d(addend, addend, multiplier, multiplicand);
+}
+
+
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
Register result = ToRegister(instr->result());
@@ -1141,6 +1363,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
+ case Token::ROR:
+ __ Ror(result, left, Operand(ToRegister(right_op)));
+ break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1162,6 +1387,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
+ case Token::ROR:
+ if (shift_count != 0) {
+ __ Ror(result, left, Operand(shift_count));
+ } else {
+ __ Move(result, left);
+ }
+ break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
@@ -1243,6 +1475,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
+ CpuFeatures::Scope scope(FPU);
double v = instr->value();
__ Move(result, v);
}
@@ -1351,6 +1584,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -1431,6 +1673,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
+ CpuFeatures::Scope scope(FPU);
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
@@ -1471,6 +1714,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -1517,7 +1761,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
// Other arch use a nop here, to signal that there is no inlined
// patchable code. Mips does not need the nop, since our marker
// instruction (andi zero_reg) will never be used in normal code.
@@ -1580,6 +1824,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
@@ -1657,6 +1902,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+ CpuFeatures::Scope scope(FPU);
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -1736,6 +1982,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
+ CpuFeatures::Scope scope(FPU);
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
FPURegister left_reg = ToDoubleRegister(left);
@@ -1952,7 +2199,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = ComputeCompareCondition(op);
@@ -2041,7 +2288,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2070,7 +2317,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ GetObjectType(temp, temp2, temp2);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
} else {
__ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2081,12 +2328,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ lw(temp, FieldMemOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
// End with the address of this class_name instance in temp register.
// On MIPS, the caller must do the comparison with Handle<String>class_name.
@@ -2130,7 +2377,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ASSERT(result.is(v0));
InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ Branch(&true_label, eq, result, Operand(zero_reg));
__ li(result, Operand(factory()->false_value()));
@@ -2250,7 +2497,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
__ StoreToSafepointRegisterSlot(temp, temp);
}
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
@@ -2262,10 +2509,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
// On MIPS there is no need for a "no inlined smi code" marker (nop).
@@ -2283,16 +2538,33 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Push the return value on the stack as the parameter.
// Runtime::TraceExit returns its parameter in v0.
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
- __ mov(sp, fp);
- __ Pop(ra, fp);
- __ Addu(sp, sp, Operand(sp_delta));
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ if (NeedsEagerFrame()) {
+ int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+ if (!info()->IsStub()) {
+ __ Addu(sp, sp, Operand(sp_delta));
+ }
+ }
__ Jump(ra);
}
@@ -2651,12 +2923,61 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ }
+ } else {
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ Register value = external_pointer;
+ __ lw(value, MemOperand(scratch0(), additional_offset));
+ __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
+
+ __ srl(scratch0(), value, kBinary32MantissaBits);
+ __ And(scratch0(), scratch0(),
+ Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
+
+ Label exponent_rebiased;
+ __ Xor(at, scratch0(), Operand(0x00));
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
+
+ __ Xor(at, scratch0(), Operand(0xff));
+ Label skip;
+ __ Branch(&skip, ne, at, Operand(zero_reg));
+ __ li(scratch0(), Operand(0x7ff));
+ __ bind(&skip);
+ __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
+
+ // Rebias exponent.
+ __ Addu(scratch0(),
+ scratch0(),
+ Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
+
+ __ bind(&exponent_rebiased);
+ __ And(sfpd_hi, value, Operand(kBinary32SignMask));
+ __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
+ __ Or(sfpd_hi, sfpd_hi, at);
+
+ // Shift mantissa.
+ static const int kMantissaShiftForHiWord =
+ kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
+
+ static const int kMantissaShiftForLoWord =
+ kBitsPerInt - kMantissaShiftForHiWord;
+
+ __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
+ __ Or(sfpd_hi, sfpd_hi, at);
+ __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ } else {
+ __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
+ __ lw(sfpd_hi, MemOperand(scratch0(),
+ additional_offset + kPointerSize));
+ }
}
} else {
Register result = ToRegister(instr->result());
@@ -2725,25 +3046,28 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
key = ToRegister(instr->key());
}
- if (key_is_constant) {
- __ Addu(elements, elements,
- Operand(((constant_key + instr->additional_index()) <<
- element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
+ int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
+ ((constant_key + instr->additional_index()) << element_size_shift);
+ if (!key_is_constant) {
__ sll(scratch, key, shift_size);
- __ Addu(elements, elements, Operand(scratch));
- __ Addu(elements, elements,
- Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
- (instr->additional_index() << element_size_shift)));
- }
-
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ __ Addu(elements, elements, scratch);
+ }
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ Addu(elements, elements, Operand(base_offset));
+ __ ldc1(result, MemOperand(elements));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+ }
+ } else {
+ __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
+ __ lw(sfpd_lo, MemOperand(elements, base_offset));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ ASSERT(kPointerSize == sizeof(kHoleNanLower32));
+ DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
+ }
}
-
- __ ldc1(result, MemOperand(elements));
}
@@ -3016,8 +3340,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ __ mov(result, cp);
+ return;
+ }
+ }
}
@@ -3188,6 +3518,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(FPU);
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
@@ -3224,24 +3555,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result,
input,
scratch1,
+ double_scratch0(),
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(result, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
@@ -3255,8 +3584,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3308,17 +3639,15 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
Register except_flag = scratch;
-
__ EmitFPUTruncate(kRoundToMinusInf,
- double_scratch0().low(),
- double_scratch0(),
result,
+ double_scratch0(),
+ at,
+ double_scratch1,
except_flag);
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- __ mfc1(result, double_scratch0().low());
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
@@ -3332,6 +3661,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
@@ -3339,6 +3669,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
@@ -3363,6 +3694,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
+ CpuFeatures::Scope scope(FPU);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -3393,6 +3725,7 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
+ CpuFeatures::Scope scope(FPU);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3468,11 +3801,26 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ CpuFeatures::Scope scope(FPU);
+ DoubleRegister input = ToDoubleRegister(instr->value());
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+ DoubleRegister double_scratch2 = double_scratch0();
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(
+ masm(), input, result, double_scratch1, double_scratch2,
+ temp1, temp2, scratch0());
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3480,7 +3828,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3488,7 +3836,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3496,7 +3844,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3588,7 +3936,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
@@ -3620,9 +3968,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
+ __ li(a0, Operand(instr->arity()));
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in a2 for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->heap()->undefined_value(),
+ isolate());
+ __ li(a2, Operand(undefined_value));
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(a1));
+ ASSERT(ToRegister(instr->result()).is(v0));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
__ li(a0, Operand(instr->arity()));
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ li(a2, Operand(instr->hydrogen()->property_cell()));
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3705,29 +4073,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- if (operand->IsRegister()) {
- __ And(at, ToRegister(operand), Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- } else {
- __ li(at, ToOperand(operand));
- __ And(at, at, Operand(kSmiTagMask));
- DeoptimizeIf(ne, environment, at, Operand(zero_reg));
- }
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -3750,6 +4098,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(FPU);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@@ -3823,6 +4172,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@@ -3946,31 +4296,39 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
-
- __ mov(ToRegister(instr->result()), object_reg);
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
- __ li(new_map_reg, Operand(to_map));
- if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
+ if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ li(new_map_reg, Operand(to_map));
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kRAHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ mov(a0, object_reg);
+ __ li(a1, Operand(to_map));
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
+ __ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@@ -3978,7 +4336,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(a2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(a3));
+ __ li(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@@ -3989,11 +4349,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ Label fail;
+ __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail);
+ DeoptimizeIf(al, instr->environment());
+ __ bind(&fail);
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -4070,7 +4440,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
ASSERT(!char_code.is(result));
__ Branch(deferred->entry(), hi,
- char_code, Operand(String::kMaxAsciiCharCode));
+ char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ sll(scratch, char_code, kPointerSizeLog2);
__ Addu(result, result, scratch);
@@ -4106,6 +4476,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+ CpuFeatures::Scope scope(FPU);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4123,6 +4494,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+ CpuFeatures::Scope scope(FPU);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4184,13 +4556,51 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
+// Convert unsigned integer with specified number of leading zeroes in binary
+// representation to IEEE 754 double.
+// Integer to convert is passed in register hiword.
+// Resulting double is returned in registers hiword:loword.
+// This functions does not work correctly for 0.
+static void GenerateUInt2Double(MacroAssembler* masm,
+ Register hiword,
+ Register loword,
+ Register scratch,
+ int leading_zeroes) {
+ const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
+ const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
+
+ const int mantissa_shift_for_hi_word =
+ meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
+ const int mantissa_shift_for_lo_word =
+ kBitsPerInt - mantissa_shift_for_hi_word;
+ masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
+ if (mantissa_shift_for_hi_word > 0) {
+ masm->sll(loword, hiword, mantissa_shift_for_lo_word);
+ masm->srl(hiword, hiword, mantissa_shift_for_hi_word);
+ masm->Or(hiword, scratch, hiword);
+ } else {
+ masm->mov(loword, zero_reg);
+ masm->sll(hiword, hiword, mantissa_shift_for_hi_word);
+ masm->Or(hiword, scratch, hiword);
+ }
+
+ // If least significant bit of biased exponent was not 1 it was corrupted
+ // by most significant bit of mantissa so we should fix that.
+ if (!(biased_exponent & 1)) {
+ masm->li(scratch, 1 << HeapNumber::kExponentShift);
+ masm->nor(scratch, scratch, scratch);
+ masm->and_(hiword, hiword, scratch);
+ }
+}
+
+
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
Label slow;
Register src = ToRegister(value);
Register dst = ToRegister(instr->result());
- FPURegister dbl_scratch = double_scratch0();
+ DoubleRegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -4204,16 +4614,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ Xor(src, src, Operand(0x80000000));
}
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
+ } else {
+ FloatingPointHelper::Destination dest =
+ FloatingPointHelper::kCoreRegisters;
+ FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
+ sfpd_lo, sfpd_hi,
+ scratch0(), f2);
+ }
} else {
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+ } else {
+ Label no_leading_zero, done;
+ __ And(at, src, Operand(0x80000000));
+ __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
+
+ // Integer has one leading zeros.
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1);
+ __ Branch(&done);
+
+ __ bind(&no_leading_zero);
+ GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0);
+ __ Branch(&done);
+ }
}
if (FLAG_inline_new) {
- __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
+ __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
__ Move(dst, t1);
__ Branch(&done);
}
@@ -4227,11 +4661,19 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ StoreToSafepointRegisterSlot(zero_reg, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, v0);
+ __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
+ } else {
+ __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
+ __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
+ }
+ __ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4253,15 +4695,72 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
+ __ Move(reg, scratch0(), input_reg);
+ Label canonicalize;
+ __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&canonicalize);
+ __ Move(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ } else {
+ Label not_hole;
+ __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&not_hole);
+ __ And(scratch, sfpd_hi, Operand(0x7ff00000));
+ __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
+ Label special_nan_handling;
+ __ And(at, sfpd_hi, Operand(0x000FFFFF));
+ __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
+ __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
+ __ bind(&special_nan_handling);
+ double canonical_nan =
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+ uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
+ __ li(sfpd_lo,
+ Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
+ __ li(sfpd_hi,
+ Operand(static_cast<uint32_t>(casted_nan >> 32)));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+ // We want the untagged address first for performance
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+ DONT_TAG_RESULT);
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+ } else {
+ __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
+ __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
+ }
+ // Now that we have finished with the object's real address tag it
+ __ Addu(reg, reg, kHeapObjectTag);
+ __ bind(&done);
}
@@ -4274,6 +4773,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
+ __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4304,42 +4804,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DoubleRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
+ CpuFeatures::Scope scope(FPU);
Label load_smi, heap_number, done;
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+ // Heap number map check.
+ __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env, scratch, Operand(at));
+ } else {
+ Label heap_number;
+ __ Branch(&heap_number, eq, scratch, Operand(at));
- // Heap number map check.
- __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env, scratch, Operand(at));
- } else {
- Label heap_number;
- __ Branch(&heap_number, eq, scratch, Operand(at));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(ne, env, input_reg, Operand(at));
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(ne, env, input_reg, Operand(at));
+ // Convert undefined to NaN.
+ __ LoadRoot(at, Heap::kNanValueRootIndex);
+ __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Branch(&done);
- // Convert undefined to NaN.
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ mfc1(at, result_reg.low());
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ mfc1(scratch, result_reg.high());
+ DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ }
__ Branch(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- __ mfc1(at, result_reg.low());
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ mfc1(scratch, result_reg.high());
- DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ SmiUntag(scratch, input_reg);
+ DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg));
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ __ Move(result_reg,
+ FixedDoubleArray::hole_nan_as_double());
+ __ Branch(&done);
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ Branch(&done);
// Smi to double register conversion
__ bind(&load_smi);
@@ -4355,7 +4870,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- FPURegister single_scratch = double_scratch.low();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4370,8 +4885,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
+ CpuFeatures::Scope scope(FPU);
Register scratch3 = ToRegister(instr->temp2());
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4406,18 +4922,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- single_scratch,
+ input_reg,
double_scratch,
scratch1,
+ double_scratch2,
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
- // Load the result.
- __ mfc1(input_reg, single_scratch);
-
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
@@ -4467,10 +4981,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DoubleRegister result_reg = ToDoubleRegister(result);
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
+
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -4479,10 +5011,10 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
- FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
+ FPURegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4493,17 +5025,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToMinusInf,
- single_scratch,
+ result_reg,
double_input,
scratch1,
+ double_scratch0(),
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
-
- // Load the result.
- __ mfc1(result_reg, single_scratch);
}
}
@@ -4579,37 +5109,38 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
}
-void LCodeGen::DoCheckMapCommon(Register reg,
- Register scratch,
+void LCodeGen::DoCheckMapCommon(Register map_reg,
Handle<Map> map,
CompareMapMode mode,
LEnvironment* env) {
Label success;
- __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
+ __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode);
DeoptimizeIf(al, env);
__ bind(&success);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
- Register scratch = scratch0();
+ Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
Register reg = ToRegister(input);
Label success;
SmallMapList* map_set = instr->hydrogen()->map_set();
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
for (int i = 0; i < map_set->length() - 1; i++) {
Handle<Map> map = map_set->at(i);
__ CompareMapAndBranch(
- reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
+ map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment());
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+ CpuFeatures::Scope vfp_scope(FPU);
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
@@ -4618,6 +5149,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+ CpuFeatures::Scope vfp_scope(FPU);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -4625,6 +5157,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+ CpuFeatures::Scope vfp_scope(FPU);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
@@ -4661,30 +5194,30 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = ToRegister(instr->temp2());
+ Register prototype_reg = ToRegister(instr->temp());
+ Register map_reg = ToRegister(instr->temp2());
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
- // Load prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ ASSERT(prototypes->length() == maps->length());
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(temp1, current_prototype);
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ __ LoadHeapObject(prototype_reg,
+ prototypes->at(prototypes->length() - 1));
+ } else {
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg,
+ maps->at(i),
+ ALLOW_ELEMENT_TRANSITION_MAPS,
+ instr->environment());
+ }
}
-
- // Check the holder map.
- DoCheckMapCommon(temp1, temp2,
- Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@@ -4771,10 +5304,74 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ flags);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, zero_reg);
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(size, size);
+ __ push(size);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
+ AllocationSiteMode allocation_site_mode =
+ instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
@@ -4808,8 +5405,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(instr->hydrogen()->depth() == 1);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -4817,10 +5414,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
} else {
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4828,10 +5425,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset) {
+ int* offset,
+ AllocationSiteMode mode) {
ASSERT(!source.is(a2));
ASSERT(!result.is(a2));
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ object->map()->CanTrackAllocationSite();
+
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
@@ -4841,8 +5442,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
*offset += object_size + elements_size;
// Copy object header.
@@ -4861,13 +5467,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
+ isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
@@ -4877,6 +5485,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
}
}
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ __ li(a2, Operand(Handle<Map>(isolate()->heap()->
+ allocation_site_info_map())));
+ __ sw(a2, FieldMemOperand(result, object_size));
+ __ sw(source, FieldMemOperand(result, object_size + kPointerSize));
+ }
if (has_elements) {
// Copy elements backing store header.
@@ -4907,13 +5522,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ Addu(a2, result, Operand(*offset));
__ sw(a2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
__ sw(a2, FieldMemOperand(result, total_offset));
@@ -4964,7 +5580,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset,
+ instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
@@ -4976,25 +5593,26 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
instr->hydrogen()->constant_properties();
// Set up the parameters to the stub/runtime call.
- __ LoadHeapObject(t0, literals);
- __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a2, Operand(constant_properties));
+ __ LoadHeapObject(a3, literals);
+ __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ li(a1, Operand(constant_properties));
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
- __ li(a1, Operand(Smi::FromInt(flags)));
- __ Push(t0, a3, a2, a1);
+ __ li(a0, Operand(Smi::FromInt(flags)));
// Pick the right runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ Push(a3, a2, a1, a0);
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -5068,7 +5686,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
FastNewClosureStub stub(shared_info->language_mode());
__ li(a1, Operand(shared_info));
__ push(a1);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ li(a2, Operand(shared_info));
__ li(a1, Operand(pretenure
@@ -5125,7 +5743,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// register.
Condition final_branch_condition = kNoCondition;
Register scratch = scratch0();
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
@@ -5133,7 +5751,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(at);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, input, scratch);
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5146,7 +5764,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ LoadRoot(at, Heap::kTrueValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
__ LoadRoot(at, Heap::kFalseValueRootIndex);
@@ -5154,13 +5772,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
cmp1 = at;
cmp2 = Operand(input);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
// The first instruction of JumpIfSmi is an And - it is safe in the delay
@@ -5174,7 +5792,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(zero_reg);
final_branch_condition = ne;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ GetObjectType(input, scratch, input);
@@ -5183,16 +5801,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
final_branch_condition = eq;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
}
- // input is an object, it is safe to use GetObjectType in the delay slot.
- __ GetObjectType(input, input, scratch);
- __ Branch(USE_DELAY_SLOT, false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ if (FLAG_harmony_symbols) {
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, true_label, eq, scratch, Operand(SYMBOL_TYPE));
+ // Still an object, so the InstanceType can be loaded.
+ __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ } else {
+ // input is an object, it is safe to use GetObjectType in the delay slot.
+ __ GetObjectType(input, input, scratch);
+ __ Branch(USE_DELAY_SLOT, false_label,
+ lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ }
// Still an object, so the InstanceType can be loaded.
__ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
__ Branch(USE_DELAY_SLOT, false_label,
@@ -5246,6 +5874,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt() {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
@@ -5276,6 +5905,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
Register object = ToRegister(instr->object());
Register key = ToRegister(instr->key());
@@ -5335,7 +5969,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
index 7363eb8..b4476c4 100644
--- a/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-codegen-mips.h
@@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
@@ -123,10 +134,11 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
- void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map,
+ void DoCheckMapCommon(Register map_reg, Handle<Map> map,
CompareMapMode mode, LEnvironment* env);
// Parallel move support.
@@ -189,7 +201,7 @@ class LCodeGen BASE_EMBEDDED {
Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -262,8 +274,10 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -316,11 +330,8 @@ class LCodeGen BASE_EMBEDDED {
DoubleRegister result,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ LEnvironment* env,
+ NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -365,14 +376,19 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt();
@@ -394,6 +410,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
@@ -401,6 +418,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -416,6 +434,7 @@ class LCodeGen BASE_EMBEDDED {
PushSafepointRegistersScope(LCodeGen* codegen,
Safepoint::Kind kind)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->expected_safepoint_kind_ = kind;
diff --git a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
index 87efae5..a4a4411 100644
--- a/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
@@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
+ CpuFeatures::Scope scope(FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
@@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
+ CpuFeatures::Scope scope(FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
@@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
+ CpuFeatures::Scope scope(FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.cc b/src/3rdparty/v8/src/mips/lithium-mips.cc
index 7b71758..6170eb9 100644
--- a/src/3rdparty/v8/src/mips/lithium-mips.cc
+++ b/src/3rdparty/v8/src/mips/lithium-mips.cc
@@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -112,7 +112,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -177,6 +181,7 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
+ case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -296,6 +301,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -345,6 +355,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
stream->Add(" length ");
@@ -372,11 +393,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -590,6 +627,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -655,6 +693,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -939,6 +982,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegisterAtStart(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -975,7 +1024,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ // If there is a non-return use, the context must be allocated in a register.
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ return DefineAsRegister(new(zone()) LContext);
+ }
+ }
+
+ return NULL;
}
@@ -992,7 +1048,8 @@ LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LGlobalObject(context, instr->qml_global()));
+ return DefineAsRegister(new(zone()) LGlobalObject(context,
+ instr->qml_global()));
}
@@ -1023,6 +1080,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
// Input cannot be the same as the result.
// See lithium-codegen-mips.cc::DoMathPowHalf.
@@ -1032,7 +1098,9 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
- LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
+
+ LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
+ (op == kMathFloor) ? TempRegister() : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
@@ -1067,7 +1135,8 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), v0), instr);
+ LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
+ return MarkAsCall(DefineFixed(result, v0), instr);
}
@@ -1085,6 +1154,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ LOperand* constructor = UseFixed(instr->constructor(), a1);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), a1);
argument_count_ -= instr->argument_count();
@@ -1099,6 +1176,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+ return DoShift(Token::ROR, instr);
+}
+
+
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1233,8 +1315,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
+ HAdd* add = HAdd::cast(instr->uses().value());
+ if (instr == add->left()) {
+ // This mul is the lhs of an add. The add and mul will be folded
+ // into a multiply-add.
+ return NULL;
+ }
+ if (instr == add->right() && !add->left()->IsMul()) {
+ // This mul is the rhs of an add, where the lhs is not another mul.
+ // The add and mul will be folded into a multiply-add.
+ return NULL;
+ }
+ }
+ }
return DoArithmeticD(Token::MUL, instr);
-
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1261,6 +1357,15 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+ LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+ LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+ LOperand* addend_op = UseRegisterAtStart(addend);
+ return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+ multiplicand_op));
+}
+
+
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1274,6 +1379,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
+ if (kArchVariant == kMips32r2) {
+ if (instr->left()->IsMul())
+ return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+ if (instr->right()->IsMul()) {
+ ASSERT(!instr->left()->IsMul());
+ return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+ }
+ }
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@@ -1339,7 +1453,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1493,6 +1607,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ LOperand* value = UseRegister(instr->value());
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1531,6 +1666,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation to = instr->to();
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1544,8 +1680,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
- : NULL;
+ LOperand* temp3 = FixedTemp(f22);
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
@@ -1556,6 +1691,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
@@ -1575,6 +1711,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegisterAtStart(val);
@@ -1630,6 +1767,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1813,7 +1956,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
+ // float->double conversion on non-VFP2 requires an extra scratch
+ // register. For convenience, just mark the elements register as "UseTemp"
+ // so that it can be used as a temp during the float->double conversion
+ // after it's no longer needed after the float load.
+ bool needs_temp =
+ !CpuFeatures::IsSupported(FPU) &&
+ (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
+ LOperand* external_pointer = needs_temp
+ ? UseTempRegister(instr->elements())
+ : UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@@ -1838,45 +1990,47 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- bool val_is_temp_register =
- elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
- elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register || needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegister(instr->value());
- LStoreKeyed* result = NULL;
+
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
-
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
LOperand* object = NULL;
+ LOperand* val = NULL;
+ LOperand* key = NULL;
+
if (instr->value()->representation().IsDouble()) {
object = UseRegisterAtStart(instr->elements());
+ key = UseRegisterOrConstantAtStart(instr->key());
+ val = UseTempRegister(instr->value());
} else {
ASSERT(instr->value()->representation().IsTagged());
object = UseTempRegister(instr->elements());
+ val = needs_write_barrier ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ key = needs_write_barrier ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
}
- result = new(zone()) LStoreKeyed(object, key, val);
- } else {
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
-
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LStoreKeyed(external_pointer, key, val);
+ return new(zone()) LStoreKeyed(object, key, val);
}
- ASSERT(result != NULL);
- return result;
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ bool val_is_temp_register =
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
+
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -1895,14 +2049,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
- LOperand* object = UseRegister(instr->object());
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* new_map_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, NULL);
return DefineSameAsFirst(result);
+ } else if (FLAG_compiled_transitions) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), a0);
LOperand* fixed_object_reg = FixedTemp(a2);
@@ -1911,11 +2067,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
- return MarkAsCall(DefineFixed(result, v0), instr);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -1982,12 +2148,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
LAllocateObject* result =
new(zone()) LAllocateObject(TempRegister(), TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* size = UseTempRegister(instr->size());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(size, temp1, temp2);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
}
@@ -2030,8 +2207,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2099,7 +2285,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2143,8 +2329,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/src/3rdparty/v8/src/mips/lithium-mips.h b/src/3rdparty/v8/src/mips/lithium-mips.h
index 00e21fd..26340de 100644
--- a/src/3rdparty/v8/src/mips/lithium-mips.h
+++ b/src/3rdparty/v8/src/mips/lithium-mips.h
@@ -50,6 +50,7 @@ class LCodeGen;
V(AccessArgumentsAt) \
V(AddI) \
V(AllocateObject) \
+ V(Allocate) \
V(ApplyArguments) \
V(ArgumentsElements) \
V(ArgumentsLength) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@@ -93,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@@ -106,6 +109,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -131,9 +135,11 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathExp) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
+ V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@@ -147,6 +153,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -169,6 +176,7 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -250,6 +258,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
// Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
+ // Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return is_call_; }
virtual bool HasResult() const = 0;
@@ -393,6 +406,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
+class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@@ -601,6 +623,24 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+ LOperand* multiplicand) {
+ inputs_[0] = addend;
+ inputs_[1] = multiplier;
+ inputs_[2] = multiplicand;
+ }
+
+ LOperand* addend() { return inputs_[0]; }
+ LOperand* multiplier() { return inputs_[1]; }
+ LOperand* multiplicand() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -616,7 +656,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -641,6 +681,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LMathExp(LOperand* value,
+ LOperand* double_temp,
+ LOperand* temp1,
+ LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = double_temp;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+ LOperand* double_temp() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -897,6 +961,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1118,6 +1195,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1352,6 +1453,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1498,6 +1600,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
@@ -1672,6 +1775,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
@@ -1743,6 +1863,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1927,10 +2048,10 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
LTransitionElementsKind(LOperand* object,
LOperand* new_map_temp,
- LOperand* temp) {
+ LOperand* fixed_object_temp) {
inputs_[0] = object;
temps_[0] = new_map_temp;
- temps_[1] = temp;
+ temps_[1] = fixed_object_temp;
}
LOperand* object() { return inputs_[0]; }
@@ -1945,6 +2066,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
@@ -2057,8 +2196,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@@ -2126,7 +2267,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
-class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
+class LAllocateObject: public LTemplateInstruction<1, 1, 2> {
public:
LAllocateObject(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2141,6 +2282,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> {
};
+class LAllocate: public LTemplateInstruction<1, 2, 2> {
+ public:
+ LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) {
+ inputs_[1] = size;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ }
+
+ LOperand* size() { return inputs_[1]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@@ -2265,8 +2423,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
@@ -2388,6 +2547,8 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
+ LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
private:
enum Status {
UNUSED,
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
index aebfe73..b8eb084 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.cc
@@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::PushSafepointRegistersAndDoubles() {
PushSafepointRegisters();
- Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() {
void MacroAssembler::PopSafepointRegistersAndDoubles() {
- for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
FPURegister reg = FPURegister::FromAllocationIndex(i);
ldc1(reg, MemOperand(sp, i * kDoubleSize));
}
- Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize));
+ Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
PopSafepointRegisters();
}
@@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
UNIMPLEMENTED_MIPS();
// General purpose registers are pushed last on the stack.
- int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize;
+ int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
return MemOperand(sp, doubles_size + register_offset);
}
@@ -1395,49 +1395,68 @@ void MacroAssembler::ConvertToInt32(Register source,
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact) {
+ ASSERT(!result.is(scratch));
+ ASSERT(!double_input.is(double_scratch));
+ ASSERT(!except_flag.is(scratch));
+
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
+ Label done;
+
+ // Clear the except flag (0 = no exception)
+ mov(except_flag, zero_reg);
+
+ // Test for values that can be exactly represented as a signed 32-bit integer.
+ cvt_w_d(double_scratch, double_input);
+ mfc1(result, double_scratch);
+ cvt_d_w(double_scratch, double_scratch);
+ BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ingore inexact exceptions.
+ // Ignore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch1, FCSR);
+ cfc1(scratch, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(result, double_input);
+ Round_w_d(double_scratch, double_input);
break;
case kRoundToZero:
- Trunc_w_d(result, double_input);
+ Trunc_w_d(double_scratch, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(result, double_input);
+ Ceil_w_d(double_scratch, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(result, double_input);
+ Floor_w_d(double_scratch, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch1, FCSR);
+ ctc1(scratch, FCSR);
+ // Move the converted value into the result register.
+ mfc1(result, double_scratch);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
+
+ bind(&done);
}
@@ -2736,7 +2755,7 @@ void MacroAssembler::DebugBreak() {
PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -3109,9 +3128,9 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -3215,7 +3234,8 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc) {
+ Label* need_gc,
+ TaggingMode tagging_mode) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3223,11 +3243,16 @@ void MacroAssembler::AllocateHeapNumber(Register result,
scratch1,
scratch2,
need_gc,
- TAG_OBJECT);
+ tagging_mode == TAG_RESULT ? TAG_OBJECT :
+ NO_ALLOCATION_FLAGS);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ if (tagging_mode == TAG_RESULT) {
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ } else {
+ sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ }
}
@@ -3380,13 +3405,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3412,8 +3437,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ sw(mantissa_reg, FieldMemOperand(
+ scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+ uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+ sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -3433,7 +3460,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+ elements_offset));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
@@ -3907,19 +3935,20 @@ void MacroAssembler::GetObjectType(Register object,
// Runtime calls.
void MacroAssembler::CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id,
Condition cond,
Register r1,
const Operand& r2,
BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(),
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
cond, r1, r2, bd);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -3931,13 +3960,13 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
int stack_space) {
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = AddressOffset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = AddressOffset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
// Allocate HandleScope in callee-save registers.
@@ -3948,6 +3977,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, a0);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// The O32 ABI requires us to pass a pointer in a0 where the returned struct
// (4 bytes) will be placed. This is also built into the Simulator.
// Set up the pointer to the returned value (a0). It was allocated in
@@ -3960,6 +3997,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
DirectCEntryStub stub;
stub.GenerateCall(this, function);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0, a0);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// As mentioned above, on MIPS a pointer is returned - we need to dereference
// it to get the actual return value (which is also a pointer).
lw(v0, MemOperand(v0));
@@ -4205,7 +4250,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
- CEntryStub stub(1, kSaveFPRegs);
+ SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
+ ? kSaveFPRegs
+ : kDontSaveFPRegs;
+ CEntryStub stub(1, mode);
CallStub(&stub);
}
@@ -4222,7 +4270,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext,
PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
+ CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
}
@@ -4251,7 +4299,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd) {
PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(),
+ Jump(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
al,
zero_reg,
@@ -4509,6 +4557,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function,
+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function,
+ FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
+ // Load the array function from the native context.
+ lw(function,
+ MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
@@ -4584,16 +4645,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
+ CpuFeatures::Scope scope(FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
ASSERT(IsPowerOf2(frame_alignment));
And(sp, sp, Operand(-frame_alignment)); // Align stack.
}
- int space = FPURegister::kNumRegisters * kDoubleSize;
+ int space = FPURegister::kMaxNumRegisters * kDoubleSize;
Subu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
@@ -4621,9 +4683,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
+ CpuFeatures::Scope scope(FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
- for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
@@ -5397,6 +5460,29 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
}
+void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+ Register receiver_reg,
+ Register scratch_reg,
+ Condition cond,
+ Label* allocation_info_present) {
+ Label no_info_available;
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ Addu(scratch_reg, receiver_reg,
+ Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start));
+ li(at, Operand(new_space_allocation_top));
+ lw(at, MemOperand(at));
+ Branch(&no_info_available, gt, scratch_reg, Operand(at));
+ lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize));
+ Branch(allocation_info_present, cond, scratch_reg,
+ Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map())));
+ bind(&no_info_available);
+}
+
+
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
if (r1.is(r2)) return true;
if (r1.is(r3)) return true;
diff --git a/src/3rdparty/v8/src/mips/macro-assembler-mips.h b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
index 8b7d7c1..11ebc86 100644
--- a/src/3rdparty/v8/src/mips/macro-assembler-mips.h
+++ b/src/3rdparty/v8/src/mips/macro-assembler-mips.h
@@ -65,6 +65,14 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2
};
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+ // Tag the result.
+ TAG_RESULT,
+ // Don't tag
+ DONT_TAG_RESULT
+};
+
// Flags used for the ObjectToDoubleFPURegister function.
enum ObjectToDoubleFlags {
// No special flags.
@@ -541,7 +549,8 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required);
+ Label* gc_required,
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -625,6 +634,7 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
+ void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -757,14 +767,16 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode.
+ // Truncates a double using a specific rounding mode, and writes the value
+ // to the result register.
// The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexacat
+ // If check_inexact is kDontCheckForInexactConversion, then the inexact
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
- FPURegister result,
+ Register result,
DoubleRegister double_input,
- Register scratch1,
+ Register scratch,
+ DoubleRegister double_scratch,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
@@ -828,6 +840,7 @@ class MacroAssembler: public Assembler {
bool can_have_holes);
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same, function is then overwritten.
@@ -977,14 +990,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
- Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
@@ -1134,6 +1147,7 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
Condition cond = cc_always,
Register r1 = zero_reg,
const Operand& r2 = Operand(zero_reg),
@@ -1432,6 +1446,17 @@ class MacroAssembler: public Assembler {
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
+ // AllocationSiteInfo support. Arrays may have an associated
+ // AllocationSiteInfo object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, jump to allocation_info_present
+ void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
+ Register scratch_reg,
+ Condition cond,
+ Label* allocation_info_present);
+
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
@@ -1506,9 +1531,9 @@ class MacroAssembler: public Assembler {
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
diff --git a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
index 672ba0e..1ae2a7a 100644
--- a/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/3rdparty/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -262,7 +262,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
if (mode_ == ASCII) {
__ lbu(a1, MemOperand(a0, 0));
__ addiu(a0, a0, char_size());
- ASSERT(str[i] <= String::kMaxAsciiCharCode);
+ ASSERT(str[i] <= String::kMaxOneByteCharCode);
BranchOrBacktrack(on_failure, ne, a1, Operand(str[i]));
} else {
__ lhu(a1, MemOperand(a0, 0));
@@ -341,7 +341,17 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Or(t0, t0, Operand(0x20)); // Also convert input character.
__ Branch(&fail, ne, t0, Operand(a3));
__ Subu(a3, a3, Operand('a'));
+#ifndef ENABLE_LATIN_1
__ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter?
+#else
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Subu(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+#endif
__ bind(&loop_check);
__ Branch(&loop, lt, a0, Operand(a1));
@@ -511,7 +521,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
Handle<ByteArray> table,
Label* on_bit_set) {
__ li(a0, Operand(table));
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ And(a1, current_character(), Operand(kTableSize - 1));
__ Addu(a0, a0, a1);
} else {
@@ -1155,7 +1165,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1186,7 +1196,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.cc b/src/3rdparty/v8/src/mips/simulator-mips.cc
index cf87f93..be9f369 100644
--- a/src/3rdparty/v8/src/mips/simulator-mips.cc
+++ b/src/3rdparty/v8/src/mips/simulator-mips.cc
@@ -1016,6 +1016,13 @@ void Simulator::set_register(int reg, int32_t value) {
}
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[0];
+ registers_[reg + 1] = dbl[1];
+}
+
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
@@ -1045,6 +1052,19 @@ int32_t Simulator::get_register(int reg) const {
}
+double Simulator::get_double_from_register_pair(int reg) {
+ ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ return(dm_val);
+}
+
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
@@ -1525,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1);
}
v8::Handle<v8::Value> result = target(arg1);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
@@ -1536,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
FUNCTION_ADDR(target), arg1, arg2);
}
v8::Handle<v8::Value> result = target(arg1, arg2);
- *(reinterpret_cast<int*>(arg0)) = (int32_t) *result;
+ *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result);
set_register(v0, arg0);
} else {
SimulatorRuntimeCall target =
@@ -1740,6 +1760,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
UNIMPLEMENTED_MIPS();
};
break;
+ case COP1X:
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR:
@@ -1929,6 +1951,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
const uint32_t rt_u = static_cast<uint32_t>(rt);
const int32_t rd_reg = instr->RdValue();
+ const int32_t fr_reg = instr->FrValue();
const int32_t fs_reg = instr->FsValue();
const int32_t ft_reg = instr->FtValue();
const int32_t fd_reg = instr->FdValue();
@@ -2173,8 +2196,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
- i64 = (uint32_t) get_fpu_register(fs_reg);
- i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
+ i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
set_fpu_register_double(fd_reg, static_cast<double>(i64));
break;
case CVT_S_L:
@@ -2190,6 +2213,19 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
UNREACHABLE();
};
break;
+ case COP1X:
+ switch (instr->FunctionFieldRaw()) {
+ case MADD_D:
+ double fr, ft, fs;
+ fr = get_fpu_register_double(fr_reg);
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ set_fpu_register_double(fd_reg, fs * ft + fr);
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
case JR: {
@@ -2219,10 +2255,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero was not checked in the configuration step - div and
- // divu do not raise exceptions. On division by 0, the result will
- // be UNPREDICTABLE.
- if (rt != 0) {
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0 and
+ // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
+ if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
@@ -2718,34 +2754,7 @@ void Simulator::Execute() {
}
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
+void Simulator::CallInternal(byte* entry) {
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -2809,6 +2818,38 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
+}
+
+
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kCArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -2819,6 +2860,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+ if (!IsMipsSoftFloatABI) {
+ set_fpu_register_double(f12, d0);
+ set_fpu_register_double(f14, d1);
+ } else {
+ int buffer[2];
+ ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+ memcpy(buffer, &d0, sizeof(d0));
+ set_dw_register(a0, buffer);
+ memcpy(buffer, &d1, sizeof(d1));
+ set_dw_register(a2, buffer);
+ }
+ CallInternal(entry);
+ if (!IsMipsSoftFloatABI) {
+ return get_fpu_register_double(f0);
+ } else {
+ return get_double_from_register_pair(v0);
+ }
+}
+
+
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/src/3rdparty/v8/src/mips/simulator-mips.h b/src/3rdparty/v8/src/mips/simulator-mips.h
index 776badc..67f5953 100644
--- a/src/3rdparty/v8/src/mips/simulator-mips.h
+++ b/src/3rdparty/v8/src/mips/simulator-mips.h
@@ -184,7 +184,9 @@ class Simulator {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
@@ -214,6 +216,8 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
+ // Alternative: call a 2-argument double function.
+ double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -353,6 +357,7 @@ class Simulator {
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
+ void CallInternal(byte* entry);
// Architecture state.
// Registers.
diff --git a/src/3rdparty/v8/src/mips/stub-cache-mips.cc b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
index bd15775..fd467fa 100644
--- a/src/3rdparty/v8/src/mips/stub-cache-mips.cc
+++ b/src/3rdparty/v8/src/mips/stub-cache-mips.cc
@@ -121,14 +121,14 @@ static void ProbeTable(Isolate* isolate,
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be internalized and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
Handle<String> name,
Register scratch0,
Register scratch1) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
__ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
@@ -314,19 +314,25 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ lw(dst, FieldMemOperand(src, offset));
- } else {
+ PropertyIndex index) {
+ DoGenerateFastPropertyLoad(
+ masm, dst, src, index.is_inobject(holder), index.translate(holder));
+}
+
+
+void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index) {
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ lw(dst, FieldMemOperand(dst, offset));
+ src = dst;
}
+ __ lw(dst, FieldMemOperand(src, offset));
}
@@ -574,6 +580,15 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ Handle<Code> code = (kind == Code::STORE_IC)
+ ? masm->isolate()->builtins()->StoreIC_Miss()
+ : masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
static void GenerateCallFunction(MacroAssembler* masm,
Handle<Object> object,
const ParameterCount& arguments,
@@ -682,7 +697,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// Pass the additional arguments.
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ li(a0, api_call_info);
__ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset));
@@ -1049,50 +1064,15 @@ static void StoreIntAsFloat(MacroAssembler* masm,
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register hiword.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
-
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
-
- __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
- if (mantissa_shift_for_hi_word > 0) {
- __ sll(loword, hiword, mantissa_shift_for_lo_word);
- __ srl(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- } else {
- __ mov(loword, zero_reg);
- __ sll(hiword, hiword, mantissa_shift_for_hi_word);
- __ or_(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- __ li(scratch, 1 << HeapNumber::kExponentShift);
- __ nor(scratch, scratch, scratch);
- __ and_(hiword, hiword, scratch);
- }
-}
-
-
#undef __
#define __ ACCESS_MASM(masm())
+void StubCompiler::GenerateTailCall(Handle<Code> code) {
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -1101,7 +1081,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register scratch2,
Handle<String> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -1129,8 +1111,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsInternalizedString()) {
+ name = factory()->InternalizeString(name);
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
@@ -1142,9 +1124,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
reg = holder_reg; // From now on the object will be in holder_reg.
__ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
} else {
- Handle<Map> current_map(current->map());
- __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK,
- ALLOW_ELEMENT_TRANSITION_MAPS);
+ Register map_reg = scratch1;
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ Handle<Map> current_map(current->map());
+ // CheckMap implicitly loads the map of |reg| into |map_reg|.
+ __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+ } else {
+ __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
// object.
@@ -1156,7 +1144,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (heap()->InNewSpace(*prototype)) {
// The prototype is in new space; we cannot store a reference to it
// in the code. Load it from the map.
- __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+ __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
} else {
// The prototype is in old space; load it directly.
__ li(reg, Operand(prototype));
@@ -1174,9 +1162,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
- DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
+ DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
@@ -1194,127 +1184,127 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
- __ Ret();
+void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ Branch(success);
+ __ bind(miss);
+ GenerateLoadMiss(masm(), kind());
+ }
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
+Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label miss;
- // Check that the maps haven't changed.
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, scratch3, name, miss);
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
- // Return the constant value.
- __ LoadHeapObject(v0, value);
- __ Ret();
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // pointer into the dictionary. Check that the value is the callback.
+ Register pointer = scratch3();
+ const int kElementsStartOffset = StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
+ __ Branch(&miss, ne, scratch2(), Operand(callback));
+ }
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
}
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
- &probe_done,
- dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // pointer into the dictionary. Check that the value is the callback.
- Register pointer = scratch3;
- const int kElementsStartOffset = StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ lw(scratch2, FieldMemOperand(pointer, kValueOffset));
- __ Branch(miss, ne, scratch2, Operand(callback));
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Label* success,
+ Handle<GlobalObject> global) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
+ }
+
+ if (!last->HasFastProperties()) {
+ __ lw(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ lw(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
+ __ Branch(&miss, ne, scratch2(),
+ Operand(isolate()->factory()->null_value()));
+ }
+
+ HandlerFrontendFooter(success, &miss);
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss, scratch1);
+void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index) {
+ GenerateFastPropertyLoad(masm(), v0, reg, holder, index);
+ __ Ret();
+}
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
- }
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
+ // Return the constant value.
+ __ LoadHeapObject(v0, value);
+ __ Ret();
+}
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Build AccessorInfo::args_ list on the stack and push property name below
// the exit frame to make GC aware of them and store pointers to them.
- __ push(receiver);
- __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
+ __ push(receiver());
+ __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
if (heap()->InNewSpace(callback->data())) {
- __ li(scratch3, callback);
- __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
+ __ li(scratch3(), callback);
+ __ lw(scratch3(), FieldMemOperand(scratch3(),
+ ExecutableAccessorInfo::kDataOffset));
} else {
- __ li(scratch3, Handle<Object>(callback->data()));
+ __ li(scratch3(), Handle<Object>(callback->data(),
+ callback->GetIsolate()));
}
__ Subu(sp, sp, 4 * kPointerSize);
__ sw(reg, MemOperand(sp, 3 * kPointerSize));
- __ sw(scratch3, MemOperand(sp, 2 * kPointerSize));
- __ li(scratch3, Operand(ExternalReference::isolate_address()));
- __ sw(scratch3, MemOperand(sp, 1 * kPointerSize));
- __ sw(name_reg, MemOperand(sp, 0 * kPointerSize));
+ __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize));
+ __ li(scratch3(), Operand(ExternalReference::isolate_address()));
+ __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize));
+ __ sw(name(), MemOperand(sp, 0 * kPointerSize));
- __ mov(a2, scratch2); // Saved in case scratch2 == a1.
+ __ mov(a2, scratch2()); // Saved in case scratch2 == a1.
__ mov(a1, sp); // a1 (first argument - see note below) = Handle<String>
// NOTE: the O32 abi requires a0 to hold a special pointer when returning a
@@ -1343,22 +1333,15 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void BaseLoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1367,8 +1350,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1378,17 +1362,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1396,86 +1377,40 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
{
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ Push(receiver, holder_reg, name_reg);
+ __ Push(receiver(), holder_reg, this->name());
} else {
- __ Push(holder_reg, name_reg);
+ __ Push(holder_reg, this->name());
}
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method).
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ Branch(&interceptor_failed, eq, v0, Operand(scratch1));
+ __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+ __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
frame_scope.GenerateLeaveFrame();
__ Ret();
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), v0, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ li(scratch2, callback);
-
- __ Push(receiver, holder_reg);
- __ lw(scratch3,
- FieldMemOperand(scratch2, AccessorInfo::kDataOffset));
- __ li(scratch1, Operand(ExternalReference::isolate_address()));
- __ Push(scratch3, scratch1, scratch2, name_reg);
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
@@ -1549,7 +1484,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -1623,7 +1558,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
Register elements = t2;
Register end_elements = t1;
@@ -1634,7 +1569,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
v0,
Heap::kFixedArrayMapRootIndex,
- &call_builtin,
+ &check_double,
DONT_DO_SMI_CHECK);
// Get the array's length into v0 and calculate new length.
@@ -1650,7 +1585,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
// Check if value is a smi.
- Label with_write_barrier;
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
@@ -1671,6 +1605,39 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
+ __ bind(&check_double);
+
+ // Check that the elements are in fast mode and writable.
+ __ CheckMap(elements,
+ a0,
+ Heap::kFixedDoubleArrayMapRootIndex,
+ &call_builtin,
+ DONT_DO_SMI_CHECK);
+
+ // Get the array's length into r0 and calculate new length.
+ __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
+
+ // Get the elements' length.
+ __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ Branch(&call_builtin, gt, a0, Operand(t0));
+
+ __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ t0, a0, elements, a3, t1, a2, t5,
+ &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+
+ // Check for a smi.
+ __ Drop(argc + 1);
+ __ Ret();
+
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1682,8 +1649,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
+
+ __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
- // r3: map
+ // a3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
@@ -1692,7 +1663,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ jmp(&fast_object);
__ bind(&try_holey_map);
@@ -1703,7 +1676,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&call_builtin);
__ mov(a2, receiver);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ bind(&fast_object);
} else {
__ CheckFastObjectElements(a3, a3, &call_builtin);
@@ -1928,8 +1903,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = a1;
Register index = t1;
@@ -2008,8 +1984,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
v0,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- v0, holder, a1, a3, t0, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ v0, holder, a1, a3, t0, name, &miss);
Register receiver = v0;
Register index = t1;
@@ -2039,7 +2016,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(v0, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
__ Drop(argc + 1);
__ Ret();
}
@@ -2416,25 +2393,16 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// -- a2 : name
// -- ra : return address
// -----------------------------------
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
-
GenerateNameCheck(name, &miss);
// Get the receiver from the stack.
@@ -2467,77 +2435,93 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ GetObjectType(a1, a3, a3);
- __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(a1, &fast);
- __ GetObjectType(a1, a0, a0);
- __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ GetObjectType(a1, a1, a3);
+ __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE));
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ LoadRoot(t0, Heap::kTrueValueRootIndex);
- __ Branch(&fast, eq, a1, Operand(t0));
- __ LoadRoot(t0, Heap::kFalseValueRootIndex);
- __ Branch(&miss, ne, a1, Operand(t0));
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- a0, holder, a3, a1, t0, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(a1, &fast);
+ __ GetObjectType(a1, a0, a0);
+ __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
break;
}
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ LoadRoot(t0, Heap::kTrueValueRootIndex);
+ __ Branch(&fast, eq, a1, Operand(t0));
+ __ LoadRoot(t0, Heap::kFalseValueRootIndex);
+ __ Branch(&miss, ne, a1, Operand(t0));
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ a0, holder, a3, a1, t0, name, &miss);
+ break;
+ }
+ }
+ __ jmp(success);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(
function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
+}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2679,7 +2663,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
+ Handle<ExecutableAccessorInfo> callback) {
// ----------- S t a t e -------------
// -- a0 : value
// -- a1 : receiver
@@ -2879,80 +2863,43 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the receiver is not a smi.
- __ JumpIfSmi(a0, &miss);
-
- // Check the maps of the full prototype chain.
- CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Handle<GlobalObject> global) {
+ Label success;
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, a1, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain is still the same.
__ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
__ Ret();
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- __ mov(v0, a0);
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a0, a2, a3, a1, t0, t1 };
+ return registers;
+}
- GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { a1, a0, a2, a3, t0, t1 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss) {
+ __ Branch(miss, ne, name_reg, Operand(name));
}
@@ -2993,91 +2940,18 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter);
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -- [sp] : receiver
- // -----------------------------------
- Label miss;
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
+ Handle<GlobalObject> global,
Handle<JSGlobalPropertyCell> cell,
Handle<String> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- a0 : receiver
- // -- a2 : name
- // -- ra : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
- // Check that the map of the global has not changed.
- __ JumpIfSmi(a0, &miss);
- CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss);
+ __ CheckMap(
+ receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ li(a3, Operand(cell));
@@ -3089,181 +2963,16 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Branch(&miss, eq, t0, Operand(at));
}
- __ mov(v0, t0);
+ HandlerFrontendFooter(&success, &miss);
+ __ bind(&success);
+
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+ __ mov(v0, t0);
__ Ret();
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name,
- &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadArrayLength(masm(), a1, a2, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- // Check the key is the cached one.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3);
-
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss;
-
- Counters* counters = masm()->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
-
- // Check the name hasn't changed.
- __ Branch(&miss, ne, a0, Operand(name));
-
- GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
}
@@ -3275,42 +2984,55 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- a1 : receiver
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode(isolate());
+ __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode(isolate());
+ __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
+ }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<String> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(a1, &miss);
+
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
+
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
int receiver_count = receiver_maps->length();
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET,
- eq, a2, Operand(receiver_maps->at(current)));
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
+ eq, map_reg, Operand(receiver_maps->at(current)));
}
__ bind(&miss);
- Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+ GenerateLoadMiss(masm(), kind());
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ InlineCacheState state =
+ receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(Code::IC_FRAGMENT, type, name, state);
}
@@ -3367,7 +3089,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+ KeyedStoreElementStub(is_js_array,
+ elements_kind,
+ grow_mode_).GetCode(isolate());
__ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK);
@@ -3413,7 +3137,7 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ Jump(miss_ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
@@ -3522,7 +3246,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ bind(&next);
} else {
// Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
+ masm()->isolate());
__ li(a2, Operand(constant));
__ sw(a2, MemOperand(t5));
__ Addu(t5, t5, kPointerSize);
@@ -3654,6 +3379,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
FPURegister double_scratch0,
+ FPURegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3669,15 +3395,15 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DONT_DO_SMI_CHECK);
__ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
__ EmitFPUTruncate(kRoundToZero,
- double_scratch0,
- double_scratch0,
scratch0,
+ double_scratch0,
+ at,
+ double_scratch1,
scratch1,
kCheckForInexactConversion);
__ Branch(fail, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch0, double_scratch0);
__ SmiTagCheckOverflow(key, scratch0, scratch1);
__ BranchOnOverflow(fail, scratch1);
__ bind(&key_ok);
@@ -3688,343 +3414,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow, failed_allocation;
-
- Register key = a0;
- Register receiver = a1;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
-
- __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
- // a3: elements array
-
- // Check that the index is in range.
- __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
- __ sra(t2, key, kSmiTagSize);
- // Unsigned comparison catches both negative and too-large values.
- __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
-
- __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
- // a3: base pointer of external storage
-
- // We are not untagging smi key and instead work with it
- // as if it was premultiplied by 2.
- STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
-
- Register value = a2;
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lb(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lbu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lh(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t3, a3, key);
- __ lhu(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t2, key, 1);
- __ addu(t3, a3, t2);
- __ lw(value, MemOperand(t3, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ sll(t3, t2, 2);
- __ addu(t3, a3, t3);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ lwc1(f0, MemOperand(t3, 0));
- } else {
- __ lw(value, MemOperand(t3, 0));
- }
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ sll(t2, key, 2);
- __ addu(t3, a3, t2);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ ldc1(f0, MemOperand(t3, 0));
- } else {
- // t3: pointer to the beginning of the double we want to load.
- __ lw(a2, MemOperand(t3, 0));
- __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
- }
- break;
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
-
- // For integer array types:
- // a2: value
- // For float array type:
- // f0: value (if FPU is supported)
- // a2: value (if FPU is not supported)
- // For double array type:
- // f0: value (if FPU is supported)
- // a2/a3: value (if FPU is not supported)
-
- if (elements_kind == EXTERNAL_INT_ELEMENTS) {
- // For the Int and UnsignedInt array types, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
- __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
- __ Branch(&box_int, lt, t3, Operand(zero_reg));
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- __ mtc1(value, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- Register dst1 = t2;
- Register dst2 = t3;
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm,
- value,
- dest,
- f0,
- dst1,
- dst2,
- t1,
- f2);
- __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // The test is different for unsigned int values. Since we need
- // the value to be in the range of a positive smi, we can't
- // handle either of the top two bits being set in the value.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- Label pl_box_int;
- __ And(t2, value, Operand(0xC0000000));
- __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
-
- // It can fit in an Smi.
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- __ bind(&pl_box_int);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
- // registers - also when jumping due to exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
-
- // This is replaced by a macro:
- // __ mtc1(value, f0); // LS 32-bits.
- // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
- // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
-
- __ Cvt_d_uw(f0, value, f22);
-
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
-
- __ Ret();
- } else {
- // Check whether unsigned integer fits into smi.
- Label box_int_0, box_int_1, done;
- __ And(t2, value, Operand(0x80000000));
- __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
- __ And(t2, value, Operand(0x40000000));
- __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
-
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
-
- Register hiword = value; // a2.
- Register loword = a3;
-
- __ bind(&box_int_0);
- // Integer does not have leading zeros.
- GenerateUInt2Double(masm, hiword, loword, t0, 0);
- __ Branch(&done);
-
- __ bind(&box_int_1);
- // Integer has one leading zero.
- GenerateUInt2Double(masm, hiword, loword, t0, 1);
-
-
- __ bind(&done);
- // Integer was converted to double in registers hiword:loword.
- // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
- // clobbers all registers - also when jumping due to exhausted young
- // space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
-
- __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
- __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
-
- __ mov(v0, t2);
- __ Ret();
- }
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The float (single) value is already in fpu reg f0 (if we use float).
- __ cvt_d_s(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // FPU is not available, do manual single to double conversion.
-
- // a2: floating point value (binary32).
- // v0: heap number for result
-
- // Extract mantissa to t4.
- __ And(t4, value, Operand(kBinary32MantissaMask));
-
- // Extract exponent to t5.
- __ srl(t5, value, kBinary32MantissaBits);
- __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
-
- __ li(t0, 0x7ff);
- __ Xor(t1, t5, Operand(0xFF));
- __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
- __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(t5,
- t5,
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(a2, value, Operand(kBinary32SignMask));
- value = no_reg;
- __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
- __ or_(a2, a2, t0);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(t0, t4, kMantissaShiftForHiWord);
- __ or_(a2, a2, t0);
- __ sll(a0, t4, kMantissaShiftForLoWord);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ Ret();
- }
-
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
- // The double value is already in f0
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // Allocate a HeapNumber for the result. Don't use a0 and a1 as
- // AllocateHeapNumber clobbers all registers - also when jumping due to
- // exhausted young space.
- __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
-
- __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
- __ Ret();
- }
-
- } else {
- // Tag integer as smi and return it.
- __ sll(v0, value, kSmiTagSize);
- __ Ret();
- }
-
- // Slow case, key and receiver still in a0 and a1.
- __ bind(&slow);
- __ IncrementCounter(
- masm->isolate()->counters()->keyed_load_external_array_slow(),
- 1, a2, a3);
-
- // ---------- S t a t e --------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
-
- __ Push(a1, a0);
-
- __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -4047,7 +3436,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -4126,7 +3515,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst1, dst2.
+ f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
t0, f2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
@@ -4424,115 +3813,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ AssertFastElements(a2);
-
- // Check that the key is within bounds.
- __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
- __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
-
- // Load the result and make sure it's not the hole.
- __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, t0, a3);
- __ lw(t0, MemOperand(t0));
- __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
- __ Branch(&miss_force_generic, eq, t0, Operand(t1));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, t0);
-
- __ bind(&miss_force_generic);
- Handle<Code> stub =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(stub, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- ra : return address
- // -- a0 : key
- // -- a1 : receiver
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- Register key_reg = a0;
- Register receiver_reg = a1;
- Register elements_reg = a2;
- Register heap_number_reg = a2;
- Register indexed_double_offset = a3;
- Register scratch = t0;
- Register scratch2 = t1;
- Register scratch3 = t2;
- Register heap_number_map = t3;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
-
- // Get the elements array.
- __ lw(elements_reg,
- FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
-
- // Check that the key is within bounds.
- __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
- __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
-
- // Load the upper word of the double in the fixed array and test for NaN.
- __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
- __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
-
- // Non-NaN. Allocate a new heap number and copy the double value into it.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber);
-
- // Don't need to reload the upper 32 bits of the double, it's already in
- // scratch.
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kExponentOffset));
- __ lw(scratch, FieldMemOperand(indexed_double_offset,
- FixedArray::kHeaderSize));
- __ sw(scratch, FieldMemOperand(heap_number_reg,
- HeapNumber::kMantissaOffset));
-
- __ mov(v0, heap_number_reg);
- __ Ret();
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ Jump(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ Jump(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
@@ -4561,7 +3841,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4705,11 +3985,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- a1 : key
// -- a2 : receiver
// -- ra : return address
- // -- a3 : scratch
+ // -- a3 : scratch (elements backing store)
// -- t0 : scratch (elements_reg)
// -- t1 : scratch (mantissa_reg)
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
+ // -- t4 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4722,13 +4003,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1;
Register scratch3 = t2;
Register scratch4 = t3;
+ Register scratch5 = t4;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4752,7 +4034,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
- receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4803,14 +4084,32 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
- // Initialize the new FixedDoubleArray. Leave elements unitialized for
- // efficiency, they are guaranteed to be initialized before use.
+ // Initialize the new FixedDoubleArray.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
+ __ mov(scratch1, elements_reg);
+ __ StoreNumberToDoubleElements(value_reg,
+ key_reg,
+ // All registers after this are overwritten.
+ scratch1,
+ scratch2,
+ scratch3,
+ scratch4,
+ scratch5,
+ &transition_elements_kind);
+
+ __ li(scratch1, Operand(kHoleNanLower32));
+ __ li(scratch2, Operand(kHoleNanUpper32));
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ int offset = FixedDoubleArray::OffsetOfElementAt(i);
+ __ sw(scratch1, FieldMemOperand(elements_reg, offset));
+ __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
+ }
+
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4823,7 +4122,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ Ret();
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
diff --git a/src/3rdparty/v8/src/mirror-debugger.js b/src/3rdparty/v8/src/mirror-debugger.js
index a5331a0..7f1a05a 100644
--- a/src/3rdparty/v8/src/mirror-debugger.js
+++ b/src/3rdparty/v8/src/mirror-debugger.js
@@ -1844,10 +1844,14 @@ function ScopeDetails(frame, fun, index) {
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
+ this.frame_id_ = frame.details_.frameId();
+ this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
this.details_ = %GetFunctionScopeDetails(fun.value(), index);
+ this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
+ this.index_ = index;
}
@@ -1867,6 +1871,22 @@ ScopeDetails.prototype.object = function() {
};
+ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
+ var raw_res;
+ if (!IS_UNDEFINED(this.break_id_)) {
+ %CheckExecutionState(this.break_id_);
+ raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
+ this.inlined_frame_id_, this.index_, name, new_value);
+ } else {
+ raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
+ name, new_value);
+ }
+ if (!raw_res) {
+ throw new Error("Failed to set variable value");
+ }
+};
+
+
/**
* Mirror object for scope of frame or function. Either frame or function must
* be specified.
@@ -1914,6 +1934,11 @@ ScopeMirror.prototype.scopeObject = function() {
};
+ScopeMirror.prototype.setVariableValue = function(name, new_value) {
+ this.details_.setVariableValueImpl(name, new_value);
+};
+
+
/**
* Mirror object for script source.
* @param {Script} script The script object
diff --git a/src/3rdparty/v8/src/mksnapshot.cc b/src/3rdparty/v8/src/mksnapshot.cc
index d777551..a3665e9 100644
--- a/src/3rdparty/v8/src/mksnapshot.cc
+++ b/src/3rdparty/v8/src/mksnapshot.cc
@@ -384,7 +384,7 @@ int main(int argc, char** argv) {
// context even after we have disposed of the context.
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
- context.Dispose();
+ context.Dispose(context->GetIsolate());
CppByteSink sink(argv[1]);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
diff --git a/src/3rdparty/v8/src/object-observe.js b/src/3rdparty/v8/src/object-observe.js
index 28aa1f4..b35f547 100644
--- a/src/3rdparty/v8/src/object-observe.js
+++ b/src/3rdparty/v8/src/object-observe.js
@@ -27,38 +27,40 @@
"use strict";
-var InternalObjectIsFrozen = $Object.isFrozen;
-var InternalObjectFreeze = $Object.freeze;
-
var observationState = %GetObservationState();
if (IS_UNDEFINED(observationState.observerInfoMap)) {
- observationState.observerInfoMap = %CreateObjectHashTable();
- observationState.objectInfoMap = %CreateObjectHashTable();
- observationState.notifierTargetMap = %CreateObjectHashTable();
- observationState.activeObservers = new InternalArray;
+ observationState.observerInfoMap = %ObservationWeakMapCreate();
+ observationState.objectInfoMap = %ObservationWeakMapCreate();
+ observationState.notifierTargetMap = %ObservationWeakMapCreate();
+ observationState.pendingObservers = new InternalArray;
observationState.observerPriority = 0;
}
-function InternalObjectHashTable(tableName) {
- this.tableName = tableName;
+function ObservationWeakMap(map) {
+ this.map_ = map;
}
-InternalObjectHashTable.prototype = {
+ObservationWeakMap.prototype = {
get: function(key) {
- return %ObjectHashTableGet(observationState[this.tableName], key);
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return void 0;
+ return %WeakMapGet(this.map_, key);
},
set: function(key, value) {
- observationState[this.tableName] =
- %ObjectHashTableSet(observationState[this.tableName], key, value);
+ key = %UnwrapGlobalProxy(key);
+ if (!IS_SPEC_OBJECT(key)) return void 0;
+ %WeakMapSet(this.map_, key, value);
},
has: function(key) {
- return %ObjectHashTableHas(observationState[this.tableName], key);
+ return !IS_UNDEFINED(this.get(key));
}
};
-var observerInfoMap = new InternalObjectHashTable('observerInfoMap');
-var objectInfoMap = new InternalObjectHashTable('objectInfoMap');
-var notifierTargetMap = new InternalObjectHashTable('notifierTargetMap');
+var observerInfoMap =
+ new ObservationWeakMap(observationState.observerInfoMap);
+var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap);
+var notifierTargetMap =
+ new ObservationWeakMap(observationState.notifierTargetMap);
function CreateObjectInfo(object) {
var info = {
@@ -74,7 +76,7 @@ function ObjectObserve(object, callback) {
throw MakeTypeError("observe_non_object", ["observe"]);
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["observe"]);
- if (InternalObjectIsFrozen(callback))
+ if (ObjectIsFrozen(callback))
throw MakeTypeError("observe_callback_frozen");
if (!observerInfoMap.has(callback)) {
@@ -85,39 +87,40 @@ function ObjectObserve(object, callback) {
}
var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) {
- objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object, true);
- }
+ if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
+ %SetIsObserved(object, true);
var changeObservers = objectInfo.changeObservers;
- if (changeObservers.indexOf(callback) >= 0)
- return;
+ if (changeObservers.indexOf(callback) < 0) changeObservers.push(callback);
- changeObservers.push(callback);
+ return object;
}
function ObjectUnobserve(object, callback) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["unobserve"]);
+ if (!IS_SPEC_FUNCTION(callback))
+ throw MakeTypeError("observe_non_function", ["unobserve"]);
var objectInfo = objectInfoMap.get(object);
if (IS_UNDEFINED(objectInfo))
- return;
+ return object;
var changeObservers = objectInfo.changeObservers;
var index = changeObservers.indexOf(callback);
- if (index < 0)
- return;
+ if (index >= 0) {
+ changeObservers.splice(index, 1);
+ if (changeObservers.length === 0) %SetIsObserved(object, false);
+ }
- changeObservers.splice(index, 1);
+ return object;
}
function EnqueueChangeRecord(changeRecord, observers) {
for (var i = 0; i < observers.length; i++) {
var observer = observers[i];
var observerInfo = observerInfoMap.get(observer);
- observationState.activeObservers[observerInfo.priority] = observer;
+ observationState.pendingObservers[observerInfo.priority] = observer;
%SetObserverDeliveryPending();
if (IS_NULL(observerInfo.pendingChangeRecords)) {
observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
@@ -132,7 +135,7 @@ function NotifyChange(type, object, name, oldValue) {
var changeRecord = (arguments.length < 4) ?
{ type: type, object: object, name: name } :
{ type: type, object: object, name: name, oldValue: oldValue };
- InternalObjectFreeze(changeRecord);
+ ObjectFreeze(changeRecord);
EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
}
@@ -145,26 +148,20 @@ function ObjectNotifierNotify(changeRecord) {
var target = notifierTargetMap.get(this);
if (IS_UNDEFINED(target))
throw MakeTypeError("observe_notify_non_notifier");
-
if (!IS_STRING(changeRecord.type))
throw MakeTypeError("observe_type_non_string");
var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo))
- return;
-
- if (!objectInfo.changeObservers.length)
+ if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0)
return;
- var newRecord = {
- object: target
- };
+ var newRecord = { object: target };
for (var prop in changeRecord) {
- if (prop === 'object')
- continue;
- newRecord[prop] = changeRecord[prop];
+ if (prop === 'object') continue;
+ %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
+ READ_ONLY + DONT_DELETE);
}
- InternalObjectFreeze(newRecord);
+ ObjectFreeze(newRecord);
EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
}
@@ -173,17 +170,13 @@ function ObjectGetNotifier(object) {
if (!IS_SPEC_OBJECT(object))
throw MakeTypeError("observe_non_object", ["getNotifier"]);
- if (InternalObjectIsFrozen(object))
- return null;
+ if (ObjectIsFrozen(object)) return null;
var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo))
- objectInfo = CreateObjectInfo(object);
+ if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object);
if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = {
- __proto__: notifierPrototype
- };
+ objectInfo.notifier = { __proto__: notifierPrototype };
notifierTargetMap.set(objectInfo.notifier, object);
}
@@ -193,33 +186,35 @@ function ObjectGetNotifier(object) {
function DeliverChangeRecordsForObserver(observer) {
var observerInfo = observerInfoMap.get(observer);
if (IS_UNDEFINED(observerInfo))
- return;
+ return false;
var pendingChangeRecords = observerInfo.pendingChangeRecords;
if (IS_NULL(pendingChangeRecords))
- return;
+ return false;
observerInfo.pendingChangeRecords = null;
+ delete observationState.pendingObservers[observerInfo.priority];
var delivered = [];
%MoveArrayContents(pendingChangeRecords, delivered);
try {
%Call(void 0, delivered, observer);
} catch (ex) {}
+ return true;
}
function ObjectDeliverChangeRecords(callback) {
if (!IS_SPEC_FUNCTION(callback))
throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
- DeliverChangeRecordsForObserver(callback);
+ while (DeliverChangeRecordsForObserver(callback)) {}
}
function DeliverChangeRecords() {
- while (observationState.activeObservers.length) {
- var activeObservers = observationState.activeObservers;
- observationState.activeObservers = new InternalArray;
- for (var i in activeObservers) {
- DeliverChangeRecordsForObserver(activeObservers[i]);
+ while (observationState.pendingObservers.length) {
+ var pendingObservers = observationState.pendingObservers;
+ observationState.pendingObservers = new InternalArray;
+ for (var i in pendingObservers) {
+ DeliverChangeRecordsForObserver(pendingObservers[i]);
}
}
}
diff --git a/src/3rdparty/v8/src/objects-debug.cc b/src/3rdparty/v8/src/objects-debug.cc
index c2f64d4..e583016 100644
--- a/src/3rdparty/v8/src/objects-debug.cc
+++ b/src/3rdparty/v8/src/objects-debug.cc
@@ -30,6 +30,7 @@
#include "disassembler.h"
#include "disasm.h"
#include "jsregexp.h"
+#include "macro-assembler.h"
#include "objects-visiting.h"
namespace v8 {
@@ -79,6 +80,9 @@ void HeapObject::HeapObjectVerify() {
}
switch (instance_type) {
+ case SYMBOL_TYPE:
+ Symbol::cast(this)->SymbolVerify();
+ break;
case MAP_TYPE:
Map::cast(this)->MapVerify();
break;
@@ -212,6 +216,13 @@ void HeapObject::VerifyHeapPointer(Object* p) {
}
+void Symbol::SymbolVerify() {
+ CHECK(IsSymbol());
+ CHECK(HasHashCode());
+ CHECK_GT(Hash(), 0);
+}
+
+
void HeapNumber::HeapNumberVerify() {
CHECK(IsHeapNumber());
}
@@ -311,6 +322,9 @@ void Map::MapVerify() {
SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
}
+ ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE ||
+ instance_type() > LAST_JS_OBJECT_TYPE ||
+ has_slow_elements_kind() || has_external_array_elements());
}
@@ -325,6 +339,15 @@ void Map::SharedMapVerify() {
}
+void Map::VerifyOmittedPrototypeChecks() {
+ if (!FLAG_omit_prototype_checks_for_leaf_maps) return;
+ if (HasTransitionArray() || is_dictionary_map()) {
+ CHECK_EQ(0, dependent_code()->number_of_entries(
+ DependentCode::kPrototypeCheckGroup));
+ }
+}
+
+
void CodeCache::CodeCacheVerify() {
VerifyHeapPointer(default_cache());
VerifyHeapPointer(normal_type_cache());
@@ -456,21 +479,23 @@ void JSMessageObject::JSMessageObjectVerify() {
void String::StringVerify() {
CHECK(IsString());
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
- if (IsSymbol()) {
+ if (IsInternalizedString()) {
CHECK(!HEAP->InNewSpace(this));
}
if (IsConsString()) {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqAsciiString()) {
- SeqAsciiString::cast(this)->SeqAsciiStringVerify();
+ } else if (IsSeqOneByteString()) {
+ SeqOneByteString::cast(this)->SeqOneByteStringVerify();
}
}
-void SeqAsciiString::SeqAsciiStringVerify() {
+void SeqOneByteString::SeqOneByteStringVerify() {
+#ifndef ENABLE_LATIN_1
CHECK(String::IsAscii(GetChars(), length()));
+#endif
}
@@ -590,6 +615,22 @@ void Code::CodeVerify() {
}
+void Code::VerifyEmbeddedMapsDependency() {
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Map* map = Map::cast(it.rinfo()->target_object());
+ if (map->CanTransition()) {
+ CHECK(map->dependent_code()->Contains(
+ DependentCode::kWeaklyEmbeddedGroup, this));
+ }
+ }
+ }
+}
+
+
void JSArray::JSArrayVerify() {
JSObjectVerify();
CHECK(length()->IsNumber() || length()->IsUndefined());
@@ -686,16 +727,34 @@ void Foreign::ForeignVerify() {
void AccessorInfo::AccessorInfoVerify() {
- CHECK(IsAccessorInfo());
- VerifyPointer(getter());
- VerifyPointer(setter());
VerifyPointer(name());
- VerifyPointer(data());
VerifyPointer(flag());
VerifyPointer(expected_receiver_type());
}
+void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
+ CHECK(IsExecutableAccessorInfo());
+ AccessorInfoVerify();
+ VerifyPointer(getter());
+ VerifyPointer(setter());
+ VerifyPointer(data());
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() {
+ CHECK(IsDeclaredAccessorDescriptor());
+ VerifySmiField(kInternalFieldOffset);
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() {
+ CHECK(IsDeclaredAccessorInfo());
+ AccessorInfoVerify();
+ VerifyPointer(descriptor());
+}
+
+
void AccessorPair::AccessorPairVerify() {
CHECK(IsAccessorPair());
VerifyPointer(getter());
@@ -771,6 +830,13 @@ void TypeSwitchInfo::TypeSwitchInfoVerify() {
}
+void AllocationSiteInfo::AllocationSiteInfoVerify() {
+ CHECK(IsAllocationSiteInfo());
+ VerifyHeapPointer(payload());
+ CHECK(payload()->IsObject());
+}
+
+
void Script::ScriptVerify() {
CHECK(IsScript());
VerifyPointer(source());
diff --git a/src/3rdparty/v8/src/objects-inl.h b/src/3rdparty/v8/src/objects-inl.h
index b45b4d0..8e7d4cd 100644
--- a/src/3rdparty/v8/src/objects-inl.h
+++ b/src/3rdparty/v8/src/objects-inl.h
@@ -134,6 +134,19 @@ bool Object::IsFixedArrayBase() {
}
+// External objects are not extensible, so the map check is enough.
+bool Object::IsExternal() {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map() ==
+ HeapObject::cast(this)->GetHeap()->external_map();
+}
+
+
+bool Object::IsAccessorInfo() {
+ return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
+}
+
+
bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
// There is a constraint on the object; check.
if (!this->IsJSObject()) return false;
@@ -170,6 +183,7 @@ bool Object::NonFailureIsHeapObject() {
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(Symbol, SYMBOL_TYPE)
bool Object::IsString() {
@@ -178,6 +192,16 @@ bool Object::IsString() {
}
+bool Object::IsName() {
+ return IsString() || IsSymbol();
+}
+
+
+bool Object::IsUniqueName() {
+ return IsInternalizedString() || IsSymbol();
+}
+
+
bool Object::IsSpecObject() {
return Object::IsHeapObject()
&& HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
@@ -191,15 +215,15 @@ bool Object::IsSpecFunction() {
}
-bool Object::IsSymbol() {
+bool Object::IsInternalizedString() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- // Because the symbol tag is non-zero and no non-string types have the
- // symbol bit set we can test for symbols with a very simple test
- // operation.
- STATIC_ASSERT(kSymbolTag != 0);
- ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
- return (type & kIsSymbolMask) != 0;
+ // Because the internalized tag is non-zero and no non-string types have the
+ // internalized bit set we can test for internalized strings with a very
+ // simple test operation.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ ASSERT(kNotStringTag + kIsInternalizedMask > LAST_TYPE);
+ return (type & kIsInternalizedMask) != 0;
}
@@ -221,10 +245,10 @@ bool Object::IsSeqString() {
}
-bool Object::IsSeqAsciiString() {
+bool Object::IsSeqOneByteString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsAsciiRepresentation();
+ String::cast(this)->IsOneByteRepresentation();
}
@@ -244,7 +268,7 @@ bool Object::IsExternalString() {
bool Object::IsExternalAsciiString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsAsciiRepresentation();
+ String::cast(this)->IsOneByteRepresentation();
}
@@ -280,14 +304,14 @@ StringShape::StringShape(InstanceType t)
}
-bool StringShape::IsSymbol() {
+bool StringShape::IsInternalized() {
ASSERT(valid());
- STATIC_ASSERT(kSymbolTag != 0);
- return (type_ & kIsSymbolMask) != 0;
+ STATIC_ASSERT(kInternalizedTag != 0);
+ return (type_ & kIsInternalizedMask) != 0;
}
-bool String::IsAsciiRepresentation() {
+bool String::IsOneByteRepresentation() {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
@@ -299,7 +323,7 @@ bool String::IsTwoByteRepresentation() {
}
-bool String::IsAsciiRepresentationUnderneath() {
+bool String::IsOneByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
@@ -310,7 +334,7 @@ bool String::IsAsciiRepresentationUnderneath() {
case kTwoByteStringTag:
return false;
default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsAsciiRepresentation();
+ return GetUnderlying()->IsOneByteRepresentation();
}
}
@@ -333,8 +357,17 @@ bool String::IsTwoByteRepresentationUnderneath() {
bool String::HasOnlyAsciiChars() {
uint32_t type = map()->instance_type();
+#ifndef ENABLE_LATIN_1
return (type & kStringEncodingMask) == kOneByteStringTag ||
(type & kAsciiDataHintMask) == kAsciiDataHintTag;
+#else
+ return (type & kAsciiDataHintMask) == kAsciiDataHintTag;
+#endif
+}
+
+
+bool String::IsOneByteConvertible() {
+ return HasOnlyAsciiChars() || IsOneByteRepresentation();
}
@@ -567,6 +600,14 @@ bool Object::IsDeoptimizationOutputData() {
}
+bool Object::IsDependentCode() {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a dependent codes array.
+ return true;
+}
+
+
bool Object::IsTypeFeedbackCells() {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
@@ -655,13 +696,13 @@ bool Object::IsHashTable() {
bool Object::IsDictionary() {
return IsHashTable() &&
- this != HeapObject::cast(this)->GetHeap()->symbol_table();
+ this != HeapObject::cast(this)->GetHeap()->string_table();
}
-bool Object::IsSymbolTable() {
+bool Object::IsStringTable() {
return IsHashTable() &&
- this == HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
+ this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table();
}
@@ -1031,8 +1072,8 @@ Failure* Failure::Exception() {
}
-Failure* Failure::OutOfMemoryException() {
- return Construct(OUT_OF_MEMORY_EXCEPTION);
+Failure* Failure::OutOfMemoryException(intptr_t value) {
+ return Construct(OUT_OF_MEMORY_EXCEPTION, value);
}
@@ -1057,7 +1098,11 @@ Failure* Failure::Construct(Type type, intptr_t value) {
uintptr_t info =
(static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
+ // Fill the unused bits with a pattern that's easy to recognize in crash
+ // dumps.
+ static const int kFailureMagicPattern = 0x0BAD0000;
+ return reinterpret_cast<Failure*>(
+ (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern);
}
@@ -1405,16 +1450,29 @@ void JSObject::initialize_elements() {
MaybeObject* JSObject::ResetElements() {
- Object* obj;
+ if (map()->is_observed()) {
+ // Maintain invariant that observed elements are always in dictionary mode.
+ SeededNumberDictionary* dictionary;
+ MaybeObject* maybe = SeededNumberDictionary::Allocate(0);
+ if (!maybe->To(&dictionary)) return maybe;
+ if (map() == GetHeap()->non_strict_arguments_elements_map()) {
+ FixedArray::cast(elements())->set(1, dictionary);
+ } else {
+ set_elements(dictionary);
+ }
+ return this;
+ }
+
ElementsKind elements_kind = GetInitialFastElementsKind();
if (!FLAG_smi_only_arrays) {
elements_kind = FastSmiToObjectElementsKind(elements_kind);
}
- MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(),
- elements_kind);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- set_map(Map::cast(obj));
+ MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind);
+ Map* map;
+ if (!maybe->To(&map)) return maybe;
+ set_map(map);
initialize_elements();
+
return this;
}
@@ -1565,7 +1623,7 @@ void JSObject::SetInternalField(int index, Smi* value) {
}
-void JSObject::SetExternalResourceObject(Object *value) {
+void JSObject::SetExternalResourceObject(Object* value) {
ASSERT(map()->has_external_resource());
int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
WRITE_FIELD(this, offset, value);
@@ -1575,7 +1633,8 @@ void JSObject::SetExternalResourceObject(Object *value) {
Object *JSObject::GetExternalResourceObject() {
if (map()->has_external_resource()) {
- return READ_FIELD(this, GetHeaderSize() + kPointerSize * GetInternalFieldCount());
+ int offset = GetHeaderSize() + kPointerSize * GetInternalFieldCount();
+ return READ_FIELD(this, offset);
} else {
return GetHeap()->undefined_value();
}
@@ -1727,7 +1786,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) {
if (!js_value->value()->IsString()) return false;
String* str = String::cast(js_value->value());
- if (index >= (uint32_t)str->length()) return false;
+ if (index >= static_cast<uint32_t>(str->length())) return false;
return true;
}
@@ -1953,6 +2012,11 @@ void FixedArray::set_null_unchecked(Heap* heap, int index) {
}
+double* FixedDoubleArray::data_start() {
+ return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
+}
+
+
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
@@ -2112,6 +2176,16 @@ Object** DescriptorArray::GetKeySlot(int descriptor_number) {
}
+Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) {
+ return GetKeySlot(descriptor_number);
+}
+
+
+Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) {
+ return GetValueSlot(descriptor_number - 1) + 1;
+}
+
+
String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number)));
@@ -2313,7 +2387,7 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
while (true) {
Object* element = KeyAt(entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
- // symbol table during bootstrapping.
+ // string table during bootstrapping.
if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
@@ -2352,8 +2426,9 @@ CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
+CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(TypeFeedbackCells)
-CAST_ACCESSOR(SymbolTable)
+CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(JSFunctionResultCache)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(ScopeInfo)
@@ -2363,18 +2438,20 @@ CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqAsciiString)
+CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalAsciiString)
CAST_ACCESSOR(ExternalTwoByteString)
+CAST_ACCESSOR(Symbol)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(HeapNumber)
+CAST_ACCESSOR(Name)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(JSGlobalPropertyCell)
CAST_ACCESSOR(SharedFunctionInfo)
@@ -2406,6 +2483,7 @@ CAST_ACCESSOR(ExternalFloatArray)
CAST_ACCESSOR(ExternalDoubleArray)
CAST_ACCESSOR(ExternalPixelArray)
CAST_ACCESSOR(Struct)
+CAST_ACCESSOR(AccessorInfo)
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
@@ -2427,12 +2505,12 @@ SMI_ACCESSORS(String, length, kLengthOffset)
SMI_ACCESSORS(SeqString, symbol_id, kSymbolIdOffset)
-uint32_t String::hash_field() {
+uint32_t Name::hash_field() {
return READ_UINT32_FIELD(this, kHashFieldOffset);
}
-void String::set_hash_field(uint32_t value) {
+void Name::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
#if V8_HOST_ARCH_64_BIT
WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
@@ -2442,7 +2520,8 @@ void String::set_hash_field(uint32_t value) {
bool String::Equals(String* other) {
if (other == this) return true;
- if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) {
+ if (StringShape(this).IsInternalized() &&
+ StringShape(other).IsInternalized()) {
return false;
}
return SlowEquals(other);
@@ -2469,7 +2548,7 @@ uint16_t String::Get(int index) {
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
case kSeqStringTag | kOneByteStringTag:
- return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
+ return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
case kConsStringTag | kOneByteStringTag:
@@ -2495,8 +2574,8 @@ void String::Set(int index, uint16_t value) {
ASSERT(index >= 0 && index < length());
ASSERT(StringShape(this).IsSequential());
- return this->IsAsciiRepresentation()
- ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
+ return this->IsOneByteRepresentation()
+ ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
: SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
}
@@ -2518,26 +2597,90 @@ String* String::GetUnderlying() {
}
-uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
+template<class Visitor, class ConsOp>
+void String::Visit(
+ String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& cons_op,
+ int32_t type,
+ unsigned length) {
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ ASSERT(offset <= length);
+ unsigned slice_offset = offset;
+ while (true) {
+ ASSERT(type == string->map()->instance_type());
+
+ switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
+ case kSeqStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ SeqOneByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kSeqStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ SeqTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kOneByteStringTag:
+ visitor.VisitOneByteString(
+ ExternalAsciiString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kExternalStringTag | kTwoByteStringTag:
+ visitor.VisitTwoByteString(
+ ExternalTwoByteString::cast(string)->GetChars() + slice_offset,
+ length - offset);
+ return;
+
+ case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kTwoByteStringTag: {
+ SlicedString* slicedString = SlicedString::cast(string);
+ slice_offset += slicedString->offset();
+ string = slicedString->parent();
+ type = string->map()->instance_type();
+ continue;
+ }
+
+ case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kTwoByteStringTag:
+ string = cons_op.Operate(string, &offset, &type, &length);
+ if (string == NULL) return;
+ slice_offset = offset;
+ ASSERT(length == static_cast<unsigned>(string->length()));
+ continue;
+
+ default:
+ UNREACHABLE();
+ return;
+ }
+ }
+}
+
+
+uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
- ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
+void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
+ ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
-Address SeqAsciiString::GetCharsAddress() {
+Address SeqOneByteString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-char* SeqAsciiString::GetChars() {
- return reinterpret_cast<char*>(GetCharsAddress());
+uint8_t* SeqOneByteString::GetChars() {
+ return reinterpret_cast<uint8_t*>(GetCharsAddress());
}
@@ -2568,7 +2711,7 @@ int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
}
-int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
+int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
return SizeFor(length());
}
@@ -2647,8 +2790,8 @@ void ExternalAsciiString::set_resource(
}
-const char* ExternalAsciiString::GetChars() {
- return resource()->data();
+const uint8_t* ExternalAsciiString::GetChars() {
+ return reinterpret_cast<const uint8_t*>(resource()->data());
}
@@ -2696,6 +2839,135 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
}
+String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) {
+ return NULL;
+}
+
+
+unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
+ return depth & kDepthMask;
+}
+
+
+void ConsStringIteratorOp::PushLeft(ConsString* string) {
+ frames_[depth_++ & kDepthMask] = string;
+}
+
+
+void ConsStringIteratorOp::PushRight(ConsString* string) {
+ // Inplace update.
+ frames_[(depth_-1) & kDepthMask] = string;
+}
+
+
+void ConsStringIteratorOp::AdjustMaximumDepth() {
+ if (depth_ > maximum_depth_) maximum_depth_ = depth_;
+}
+
+
+void ConsStringIteratorOp::Pop() {
+ ASSERT(depth_ > 0);
+ ASSERT(depth_ <= maximum_depth_);
+ depth_--;
+}
+
+
+bool ConsStringIteratorOp::HasMore() {
+ return depth_ != 0;
+}
+
+
+void ConsStringIteratorOp::Reset() {
+ depth_ = 0;
+}
+
+
+String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out,
+ unsigned* length_out) {
+ bool blew_stack = false;
+ String* string = NextLeaf(&blew_stack, type_out, length_out);
+ // String found.
+ if (string != NULL) {
+ // Verify output.
+ ASSERT(*length_out == static_cast<unsigned>(string->length()));
+ ASSERT(*type_out == string->map()->instance_type());
+ return string;
+ }
+ // Traversal complete.
+ if (!blew_stack) return NULL;
+ // Restart search from root.
+ unsigned offset_out;
+ string = Search(&offset_out, type_out, length_out);
+ // Verify output.
+ ASSERT(string == NULL || offset_out == 0);
+ ASSERT(string == NULL ||
+ *length_out == static_cast<unsigned>(string->length()));
+ ASSERT(string == NULL || *type_out == string->map()->instance_type());
+ return string;
+}
+
+
+uint16_t StringCharacterStream::GetNext() {
+ ASSERT(buffer8_ != NULL && end_ != NULL);
+ // Advance cursor if needed.
+ // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this.
+ if (buffer8_ == end_) HasMore();
+ ASSERT(buffer8_ < end_);
+ return is_one_byte_ ? *buffer8_++ : *buffer16_++;
+}
+
+
+StringCharacterStream::StringCharacterStream(String* string,
+ ConsStringIteratorOp* op,
+ unsigned offset)
+ : is_one_byte_(false),
+ op_(op) {
+ Reset(string, offset);
+}
+
+
+void StringCharacterStream::Reset(String* string, unsigned offset) {
+ op_->Reset();
+ buffer8_ = NULL;
+ end_ = NULL;
+ int32_t type = string->map()->instance_type();
+ unsigned length = string->length();
+ String::Visit(string, offset, *this, *op_, type, length);
+}
+
+
+bool StringCharacterStream::HasMore() {
+ if (buffer8_ != end_) return true;
+ if (!op_->HasMore()) return false;
+ unsigned length;
+ int32_t type;
+ String* string = op_->ContinueOperation(&type, &length);
+ if (string == NULL) return false;
+ ASSERT(!string->IsConsString());
+ ASSERT(string->length() != 0);
+ ConsStringNullOp null_op;
+ String::Visit(string, 0, *this, null_op, type, length);
+ ASSERT(buffer8_ != end_);
+ return true;
+}
+
+
+void StringCharacterStream::VisitOneByteString(
+ const uint8_t* chars, unsigned length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ end_ = chars + length;
+}
+
+
+void StringCharacterStream::VisitTwoByteString(
+ const uint16_t* chars, unsigned length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ end_ = reinterpret_cast<const uint8_t*>(chars + length);
+}
+
+
void JSFunctionResultCache::MakeZeroSize() {
set_finger_index(kEntriesIndex);
set_size(kEntriesIndex);
@@ -2978,16 +3250,17 @@ int Map::pre_allocated_property_fields() {
int HeapObject::SizeFromMap(Map* map) {
int instance_size = map->instance_size();
if (instance_size != kVariableSizeSentinel) return instance_size;
- // We can ignore the "symbol" bit becase it is only set for symbols
- // and implies a string type.
- int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask;
+ // We can ignore the "internalized" bit becase it is only set for strings
+ // and thus implies a string type.
+ int instance_type =
+ static_cast<int>(map->instance_type()) & ~kIsInternalizedMask;
// Only inline the most frequent cases.
if (instance_type == FIXED_ARRAY_TYPE) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
if (instance_type == ASCII_STRING_TYPE) {
- return SeqAsciiString::SizeFor(
- reinterpret_cast<SeqAsciiString*>(this)->length());
+ return SeqOneByteString::SizeFor(
+ reinterpret_cast<SeqOneByteString*>(this)->length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@@ -3147,8 +3420,7 @@ void Map::set_has_external_resource(bool value) {
}
}
-bool Map::has_external_resource()
-{
+bool Map::has_external_resource() {
return ((1 << kHasExternalResource) & bit_field()) != 0;
}
@@ -3196,6 +3468,11 @@ Code::Flags Code::flags() {
}
+inline bool Map::CanTrackAllocationSite() {
+ return instance_type() == JS_ARRAY_TYPE;
+}
+
+
void Map::set_owns_descriptors(bool is_shared) {
set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
}
@@ -3207,6 +3484,9 @@ bool Map::owns_descriptors() {
void Map::set_is_observed(bool is_observed) {
+ ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE ||
+ instance_type() > LAST_JS_OBJECT_TYPE ||
+ has_slow_elements_kind() || has_external_array_elements());
set_bit_field3(IsObserved::update(bit_field3(), is_observed));
}
@@ -3216,6 +3496,71 @@ bool Map::is_observed() {
}
+void Map::NotifyLeafMapLayoutChange() {
+ dependent_code()->DeoptimizeDependentCodeGroup(
+ DependentCode::kPrototypeCheckGroup);
+}
+
+
+bool Map::CanOmitPrototypeChecks() {
+ return !HasTransitionArray() && !is_dictionary_map() &&
+ FLAG_omit_prototype_checks_for_leaf_maps;
+}
+
+
+void Map::AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code) {
+ Handle<DependentCode> codes =
+ DependentCode::Insert(Handle<DependentCode>(dependent_code()),
+ group, code);
+ if (*codes != dependent_code()) {
+ set_dependent_code(*codes);
+ }
+}
+
+
+int DependentCode::number_of_entries(DependencyGroup group) {
+ if (length() == 0) return 0;
+ return Smi::cast(get(group))->value();
+}
+
+
+void DependentCode::set_number_of_entries(DependencyGroup group, int value) {
+ set(group, Smi::FromInt(value));
+}
+
+
+Code* DependentCode::code_at(int i) {
+ return Code::cast(get(kCodesStartIndex + i));
+}
+
+
+void DependentCode::set_code_at(int i, Code* value) {
+ set(kCodesStartIndex + i, value);
+}
+
+
+Object** DependentCode::code_slot_at(int i) {
+ return HeapObject::RawField(
+ this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i));
+}
+
+
+void DependentCode::clear_code_at(int i) {
+ set_undefined(kCodesStartIndex + i);
+}
+
+
+void DependentCode::ExtendGroup(DependencyGroup group) {
+ GroupStartIndexes starts(this);
+ for (int g = kGroupCount - 1; g > group; g--) {
+ if (starts.at(g) < starts.at(g + 1)) {
+ set_code_at(starts.at(g + 1), code_at(starts.at(g)));
+ }
+ }
+}
+
+
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
@@ -3238,14 +3583,13 @@ InlineCacheState Code::ic_state() {
// a call to code object has been replaced with a debug break call.
ASSERT(is_inline_cache_stub() ||
result == UNINITIALIZED ||
- result == DEBUG_BREAK ||
- result == DEBUG_PREPARE_STEP_IN);
+ result == DEBUG_STUB);
return result;
}
Code::ExtraICState Code::extra_ic_state() {
- ASSERT(is_inline_cache_stub());
+ ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
return ExtractExtraICStateFromFlags(flags());
}
@@ -3263,9 +3607,12 @@ int Code::arguments_count() {
int Code::major_key() {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
kind() == TO_BOOLEAN_IC);
return StubMajorKeyField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
@@ -3274,9 +3621,14 @@ int Code::major_key() {
void Code::set_major_key(int major) {
ASSERT(kind() == STUB ||
+ kind() == COMPILED_STUB ||
kind() == UNARY_OP_IC ||
kind() == BINARY_OP_IC ||
kind() == COMPARE_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC ||
kind() == TO_BOOLEAN_IC);
ASSERT(0 <= major && major < 256);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
@@ -3382,7 +3734,7 @@ void Code::set_profiler_ticks(int ticks) {
unsigned Code::stack_slots() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return StackSlotsField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
}
@@ -3390,7 +3742,7 @@ unsigned Code::stack_slots() {
void Code::set_stack_slots(unsigned slots) {
CHECK(slots <= (1 << kStackSlotsBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
int updated = StackSlotsField::update(previous, slots);
WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -3398,7 +3750,7 @@ void Code::set_stack_slots(unsigned slots) {
unsigned Code::safepoint_table_offset() {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
return SafepointTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
@@ -3406,7 +3758,7 @@ unsigned Code::safepoint_table_offset() {
void Code::set_safepoint_table_offset(unsigned offset) {
CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
- ASSERT(kind() == OPTIMIZED_FUNCTION);
+ ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = SafepointTableOffsetField::update(previous, offset);
@@ -3458,66 +3810,6 @@ void Code::set_unary_op_type(byte value) {
}
-byte Code::binary_op_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::binary_op_result_type() {
- ASSERT(is_binary_op_stub());
- return BinaryOpResultTypeField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_binary_op_result_type(byte value) {
- ASSERT(is_binary_op_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = BinaryOpResultTypeField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_state() {
- ASSERT(is_compare_ic_stub());
- return CompareStateField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_state(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareStateField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-byte Code::compare_operation() {
- ASSERT(is_compare_ic_stub());
- return CompareOperationField::decode(
- READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::set_compare_operation(byte value) {
- ASSERT(is_compare_ic_stub());
- int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
- int updated = CompareOperationField::update(previous, value);
- WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return ToBooleanStateField::decode(
@@ -3548,24 +3840,38 @@ void Code::set_has_function_cache(bool flag) {
}
+bool Code::marked_for_deoptimization() {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return MarkedForDeoptimizationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_marked_for_deoptimization(bool flag) {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = MarkedForDeoptimizationField::update(previous, flag);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
bool Code::is_inline_cache_stub() {
Kind kind = this->kind();
return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND;
}
+bool Code::is_debug_break() {
+ return ic_state() == DEBUG_STUB && extra_ic_state() == DEBUG_BREAK;
+}
+
+
Code::Flags Code::ComputeFlags(Kind kind,
InlineCacheState ic_state,
ExtraICState extra_ic_state,
StubType type,
int argc,
InlineCacheHolderFlag holder) {
- // Extra IC state is only allowed for call IC stubs or for store IC
- // stubs.
- ASSERT(extra_ic_state == kNoExtraICState ||
- kind == CALL_IC ||
- kind == STORE_IC ||
- kind == KEYED_STORE_IC);
// Compute the bit mask.
int bits = KindField::encode(kind)
| ICStateField::encode(ic_state)
@@ -3578,10 +3884,10 @@ Code::Flags Code::ComputeFlags(Kind kind,
Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
- StubType type,
ExtraICState extra_ic_state,
- InlineCacheHolderFlag holder,
- int argc) {
+ StubType type,
+ int argc,
+ InlineCacheHolderFlag holder) {
return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder);
}
@@ -3871,6 +4177,7 @@ HeapObject* Map::UncheckedPrototypeTransitions() {
ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, constructor, Object, kConstructorOffset)
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
@@ -3884,14 +4191,20 @@ ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
-ACCESSORS(AccessorInfo, getter, Object, kGetterOffset)
-ACCESSORS(AccessorInfo, setter, Object, kSetterOffset)
-ACCESSORS(AccessorInfo, data, Object, kDataOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
+ACCESSORS(DeclaredAccessorDescriptor, internal_field, Smi, kInternalFieldOffset)
+
+ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor,
+ kDescriptorOffset)
+
+ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset)
+ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset)
+ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -3947,6 +4260,8 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset)
ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
+ACCESSORS(AllocationSiteInfo, payload, Object, kPayloadOffset)
+
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
ACCESSORS(Script, id, Object, kIdOffset)
@@ -3991,6 +4306,7 @@ ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset)
+SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
kHiddenPrototypeBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit)
@@ -4198,11 +4514,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
void SharedFunctionInfo::BeforeVisitingPointers() {
if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
+}
- // Flush optimized code map on major GC.
- // Note: we may experiment with rebuilding it or retaining entries
- // which should survive as we iterate through optimized functions
- // anyway.
+
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
set_optimized_code_map(Smi::FromInt(0));
}
@@ -4217,7 +4532,7 @@ bool Script::HasValidSource() {
if (!src->IsString()) return true;
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsAsciiRepresentation()) {
+ if (src_str->IsOneByteRepresentation()) {
return ExternalAsciiString::cast(src)->resource() != NULL;
} else if (src_str->IsTwoByteRepresentation()) {
return ExternalTwoByteString::cast(src)->resource() != NULL;
@@ -4259,6 +4574,19 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
}
+void SharedFunctionInfo::ReplaceCode(Code* value) {
+ // If the GC metadata field is already used then the function was
+ // enqueued as a code flushing candidate and we remove it now.
+ if (code()->gc_metadata() != NULL) {
+ CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
+ flusher->EvictCandidate(this);
+ }
+
+ ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+ set_code(value);
+}
+
+
ScopeInfo* SharedFunctionInfo::scope_info() {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
@@ -4676,13 +5004,66 @@ JSMessageObject* JSMessageObject::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
+INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
+
+
+// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
+void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::type_feedback_info() {
+ ASSERT(kind() == FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
+void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
+ ASSERT(kind() == FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+ CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
+ value, mode);
+}
+
+
+int Code::stub_info() {
+ ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC || kind() == LOAD_IC);
+ Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
+ return Smi::cast(value)->value();
+}
+
+
+void Code::set_stub_info(int value) {
+ ASSERT(kind() == COMPARE_IC ||
+ kind() == BINARY_OP_IC ||
+ kind() == LOAD_IC ||
+ kind() == KEYED_LOAD_IC ||
+ kind() == STORE_IC ||
+ kind() == KEYED_STORE_IC);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
+}
+
+
+void Code::set_deoptimizing_functions(Object* value) {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
+}
+
+
+Object* Code::deoptimizing_functions() {
+ ASSERT(kind() == OPTIMIZED_FUNCTION);
+ return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
+}
+
+
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
+
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
@@ -4863,6 +5244,11 @@ bool JSObject::HasFastHoleyElements() {
}
+bool JSObject::HasFastElements() {
+ return IsFastElementsKind(GetElementsKind());
+}
+
+
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
@@ -4945,22 +5331,22 @@ SeededNumberDictionary* JSObject::element_dictionary() {
}
-bool String::IsHashFieldComputed(uint32_t field) {
+bool Name::IsHashFieldComputed(uint32_t field) {
return (field & kHashNotComputedMask) == 0;
}
-bool String::HasHashCode() {
+bool Name::HasHashCode() {
return IsHashFieldComputed(hash_field());
}
-uint32_t String::Hash() {
+uint32_t Name::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
if (IsHashFieldComputed(field)) return field >> kHashShift;
- // Slow case: compute hash code and set it.
- return ComputeAndSetHash();
+ // Slow case: compute hash code and set it. Has to be a string.
+ return String::cast(this)->ComputeAndSetHash();
}
@@ -4979,7 +5365,7 @@ bool StringHasher::has_trivial_hash() {
}
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint32_t c) {
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
running_hash += c;
running_hash += (running_hash << 10);
running_hash ^= (running_hash >> 6);
@@ -4998,66 +5384,62 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
}
-void StringHasher::AddCharacter(uint32_t c) {
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- AddSurrogatePair(c); // Not inlined.
- return;
- }
+void StringHasher::AddCharacter(uint16_t c) {
// Use the Jenkins one-at-a-time hash function to update the hash
// for the given character.
raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
- // Incremental array index computation.
- if (is_array_index_) {
- if (c < '0' || c > '9') {
- is_array_index_ = false;
- } else {
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (c == '0' && length_ > 1) {
- is_array_index_ = false;
- return;
- }
- }
- if (array_index_ > 429496729U - ((d + 2) >> 3)) {
- is_array_index_ = false;
- } else {
- array_index_ = array_index_ * 10 + d;
- }
- }
- }
}
-void StringHasher::AddCharacterNoIndex(uint32_t c) {
- ASSERT(!is_array_index());
- if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
- AddSurrogatePairNoIndex(c); // Not inlined.
- return;
+bool StringHasher::UpdateIndex(uint16_t c) {
+ ASSERT(is_array_index_);
+ if (c < '0' || c > '9') {
+ is_array_index_ = false;
+ return false;
}
- raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
+ int d = c - '0';
+ if (is_first_char_) {
+ is_first_char_ = false;
+ if (c == '0' && length_ > 1) {
+ is_array_index_ = false;
+ return false;
+ }
+ }
+ if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+ is_array_index_ = false;
+ return false;
+ }
+ array_index_ = array_index_ * 10 + d;
+ return true;
}
-uint32_t StringHasher::GetHash() {
- // Get the calculated raw hash value and do some more bit ops to distribute
- // the hash further. Ensure that we never return zero as the hash value.
- return GetHashCore(raw_running_hash_);
+template<typename Char>
+inline void StringHasher::AddCharacters(const Char* chars, int length) {
+ ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
+ int i = 0;
+ if (is_array_index_) {
+ for (; i < length; i++) {
+ AddCharacter(chars[i]);
+ if (!UpdateIndex(chars[i])) {
+ i++;
+ break;
+ }
+ }
+ }
+ for (; i < length; i++) {
+ ASSERT(!is_array_index_);
+ AddCharacter(chars[i]);
+ }
}
template <typename schar>
-uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) {
+uint32_t StringHasher::HashSequentialString(const schar* chars,
+ int length,
+ uint32_t seed) {
StringHasher hasher(length, seed);
- if (!hasher.has_trivial_hash()) {
- int i;
- for (i = 0; hasher.is_array_index() && (i < length); i++) {
- hasher.AddCharacter(chars[i]);
- }
- for (; i < length; i++) {
- hasher.AddCharacterNoIndex(chars[i]);
- }
- }
+ if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
return hasher.GetHashField();
}
@@ -5098,6 +5480,10 @@ bool JSReceiver::HasLocalProperty(String* name) {
PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
+ uint32_t index;
+ if (IsJSObject() && key->AsArrayIndex(&index)) {
+ return GetElementAttribute(index);
+ }
return GetPropertyAttributeWithReceiver(this, key);
}
@@ -5412,6 +5798,12 @@ Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
}
+Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind) {
+ return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
+}
+
+
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
return heap->the_hole_value();
}
diff --git a/src/3rdparty/v8/src/objects-printer.cc b/src/3rdparty/v8/src/objects-printer.cc
index 6e87c7a..b4cf9a9 100644
--- a/src/3rdparty/v8/src/objects-printer.cc
+++ b/src/3rdparty/v8/src/objects-printer.cc
@@ -69,13 +69,16 @@ void HeapObject::PrintHeader(FILE* out, const char* id) {
void HeapObject::HeapObjectPrint(FILE* out) {
InstanceType instance_type = map()->instance_type();
- HandleScope scope;
+ HandleScope scope(GetIsolate());
if (instance_type < FIRST_NONSTRING_TYPE) {
String::cast(this)->StringPrint(out);
return;
}
switch (instance_type) {
+ case SYMBOL_TYPE:
+ Symbol::cast(this)->SymbolPrint(out);
+ break;
case MAP_TYPE:
Map::cast(this)->MapPrint(out);
break;
@@ -384,7 +387,7 @@ void JSObject::PrintElements(FILE* out) {
case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ FPrintF(out, " %d: %f\n", i, p->get_scalar(i));
}
break;
}
@@ -393,11 +396,16 @@ void JSObject::PrintElements(FILE* out) {
break;
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
+ FPrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
- FPrintF(out, " %d: ", i);
+ FPrintF(out, " %d:", i - 2);
p->get(i)->ShortPrint(out);
- FPrintF(out, "\n");
}
+ FPrintF(out, "\n context: ");
+ p->get(0)->ShortPrint(out);
+ FPrintF(out, "\n arguments: ");
+ p->get(1)->ShortPrint(out);
+ FPrintF(out, "\n");
break;
}
}
@@ -473,25 +481,32 @@ static const char* TypeToString(InstanceType type) {
case MAP_TYPE: return "MAP";
case HEAP_NUMBER_TYPE: return "HEAP_NUMBER";
case SYMBOL_TYPE: return "SYMBOL";
- case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL";
- case CONS_SYMBOL_TYPE: return "CONS_SYMBOL";
- case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL";
- case EXTERNAL_ASCII_SYMBOL_TYPE:
- case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL";
- case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE:
- case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE:
- case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL";
- case ASCII_STRING_TYPE: return "ASCII_STRING";
case STRING_TYPE: return "TWO_BYTE_STRING";
+ case ASCII_STRING_TYPE: return "ASCII_STRING";
case CONS_STRING_TYPE:
- case CONS_ASCII_STRING_TYPE: return "CONS_STRING";
+ case CONS_ASCII_STRING_TYPE:
+ return "CONS_STRING";
+ case EXTERNAL_STRING_TYPE:
case EXTERNAL_ASCII_STRING_TYPE:
case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING";
+ return "EXTERNAL_STRING";
+ case SHORT_EXTERNAL_STRING_TYPE:
case SHORT_EXTERNAL_ASCII_STRING_TYPE:
case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE:
- case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING";
+ return "SHORT_EXTERNAL_STRING";
+ case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING";
+ case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING";
+ case CONS_INTERNALIZED_STRING_TYPE: return "CONS_INTERNALIZED_STRING";
+ case CONS_ASCII_INTERNALIZED_STRING_TYPE:
+ return "CONS_ASCII_INTERNALIZED_STRING";
+ case EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE:
+ return "EXTERNAL_INTERNALIZED_STRING";
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+ case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE:
+ return "SHORT_EXTERNAL_INTERNALIZED_STRING";
case FIXED_ARRAY_TYPE: return "FIXED_ARRAY";
case BYTE_ARRAY_TYPE: return "BYTE_ARRAY";
case FREE_SPACE_TYPE: return "FREE_SPACE";
@@ -534,6 +549,12 @@ static const char* TypeToString(InstanceType type) {
}
+void Symbol::SymbolPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "Symbol");
+ FPrintF(out, " - hash: %d\n", Hash());
+}
+
+
void Map::MapPrint(FILE* out) {
HeapObject::PrintHeader(out, "Map");
FPrintF(out, " - type: %s\n", TypeToString(instance_type()));
@@ -663,7 +684,7 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) {
void String::StringPrint(FILE* out) {
- if (StringShape(this).IsSymbol()) {
+ if (StringShape(this).IsInternalized()) {
FPrintF(out, "#");
} else if (StringShape(this).IsCons()) {
FPrintF(out, "c\"");
@@ -685,7 +706,7 @@ void String::StringPrint(FILE* out) {
FPrintF(out, "%s", truncated_epilogue);
}
- if (!StringShape(this).IsSymbol()) FPrintF(out, "\"");
+ if (!StringShape(this).IsInternalized()) FPrintF(out, "\"");
}
@@ -698,7 +719,7 @@ char* String::ToAsciiArray() {
static char* buffer = NULL;
if (buffer != NULL) free(buffer);
buffer = new char[length()+1];
- WriteToFlat(this, buffer, 0, length());
+ WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
buffer[length()] = 0;
return buffer;
}
@@ -869,18 +890,36 @@ void Foreign::ForeignPrint(FILE* out) {
}
-void AccessorInfo::AccessorInfoPrint(FILE* out) {
- HeapObject::PrintHeader(out, "AccessorInfo");
+void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "ExecutableAccessorInfo");
+ FPrintF(out, "\n - name: ");
+ name()->ShortPrint(out);
+ FPrintF(out, "\n - flag: ");
+ flag()->ShortPrint(out);
FPrintF(out, "\n - getter: ");
getter()->ShortPrint(out);
FPrintF(out, "\n - setter: ");
setter()->ShortPrint(out);
- FPrintF(out, "\n - name: ");
- name()->ShortPrint(out);
FPrintF(out, "\n - data: ");
data()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "DeclaredAccessorInfo");
+ FPrintF(out, "\n - name: ");
+ name()->ShortPrint(out);
FPrintF(out, "\n - flag: ");
flag()->ShortPrint(out);
+ FPrintF(out, "\n - descriptor: ");
+ descriptor()->ShortPrint(out);
+}
+
+
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor");
+ FPrintF(out, "\n - internal field: ");
+ internal_field()->ShortPrint(out);
}
@@ -997,6 +1036,33 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
}
+void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "AllocationSiteInfo");
+ FPrintF(out, " - payload: ");
+ if (payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ FPrintF(out, "Array allocation with ElementsKind ");
+ PrintElementsKind(out, kind);
+ FPrintF(out, "\n");
+ return;
+ }
+ } else if (payload()->IsJSArray()) {
+ FPrintF(out, "Array literal ");
+ payload()->ShortPrint(out);
+ FPrintF(out, "\n");
+ return;
+ }
+
+ FPrintF(out, "unknown payload ");
+ payload()->ShortPrint(out);
+ FPrintF(out, "\n");
+}
+
+
void Script::ScriptPrint(FILE* out) {
HeapObject::PrintHeader(out, "Script");
FPrintF(out, "\n - source: ");
diff --git a/src/3rdparty/v8/src/objects-visiting-inl.h b/src/3rdparty/v8/src/objects-visiting-inl.h
index 71635ca..beb07b5 100644
--- a/src/3rdparty/v8/src/objects-visiting-inl.h
+++ b/src/3rdparty/v8/src/objects-visiting-inl.h
@@ -68,7 +68,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
SharedFunctionInfo::BodyDescriptor,
int>::Visit);
- table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
+ table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
@@ -110,10 +110,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
SlicedString::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitFixedArray,
- &FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- void>::Visit);
+ table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
@@ -123,7 +120,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -178,8 +175,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
ASSERT(!rinfo->target_object()->IsConsString());
HeapObject* object = HeapObject::cast(rinfo->target_object());
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
- StaticVisitor::MarkObject(heap, object);
+ if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
+ rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
+ !object->IsMap() || !Map::cast(object)->CanTransition()) {
+ heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+ StaticVisitor::MarkObject(heap, object);
+ }
}
@@ -214,7 +215,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
- && (target->ic_state() == MEGAMORPHIC || heap->flush_monomorphic_ics() ||
+ && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
+ target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
Serializer::enabled() || target->ic_age() != heap->global_ic_age())) {
IC::Clear(rinfo->pc());
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -264,12 +266,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
map_object->ClearCodeCache(heap);
}
- // When map collection is enabled we have to mark through map's
- // transitions and back pointers in a special way to make these links
- // weak. Only maps for subclasses of JSReceiver can have transitions.
- STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (FLAG_collect_maps &&
- map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ // When map collection is enabled we have to mark through map's transitions
+ // and back pointers in a special way to make these links weak.
+ if (FLAG_collect_maps && map_object->CanTransition()) {
MarkMapContents(heap, map_object);
} else {
StaticVisitor::VisitPointers(heap,
@@ -302,6 +301,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
+ if (FLAG_cache_optimized_code) {
+ // Flush optimized code map on major GC.
+ // TODO(mstarzinger): We may experiment with rebuilding it or with
+ // retaining entries which should survive as we iterate through
+ // optimized functions anyway.
+ shared->ClearOptimizedCodeMap();
+ }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
@@ -390,6 +396,41 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
ASSERT(transitions->IsMap() || transitions->IsUndefined());
}
+ // Since descriptor arrays are potentially shared, ensure that only the
+ // descriptors that appeared for this map are marked. The first time a
+ // non-empty descriptor array is marked, its header is also visited. The slot
+ // holding the descriptor array will be implicitly recorded when the pointer
+ // fields of this map are visited.
+ DescriptorArray* descriptors = map->instance_descriptors();
+ if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+ descriptors->length() > 0) {
+ StaticVisitor::VisitPointers(heap,
+ descriptors->GetFirstElementAddress(),
+ descriptors->GetDescriptorEndSlot(0));
+ }
+ int start = 0;
+ int end = map->NumberOfOwnDescriptors();
+ Object* back_pointer = map->GetBackPointer();
+ if (!back_pointer->IsUndefined()) {
+ Map* parent_map = Map::cast(back_pointer);
+ if (descriptors == parent_map->instance_descriptors()) {
+ start = parent_map->NumberOfOwnDescriptors();
+ }
+ }
+ if (start < end) {
+ StaticVisitor::VisitPointers(heap,
+ descriptors->GetDescriptorStartSlot(start),
+ descriptors->GetDescriptorEndSlot(end));
+ }
+
+ // Mark prototype dependent codes array but do not push it onto marking
+ // stack, this will make references from it weak. We will clean dead
+ // codes when we iterate over maps in ClearNonLiveTransitions.
+ Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
+ HeapObject* obj = HeapObject::cast(*slot);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+ StaticVisitor::MarkObjectWithoutPush(heap, obj);
+
// Mark the pointer fields of the Map. Since the transitions array has
// been marked already, it is fine that one of these fields contains a
// pointer to it.
@@ -634,7 +675,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
// There are two places where we iterate code bodies: here and the
- // templated CodeIterateBody (below). They should be kept in sync.
+ // templated CodeIterateBody (below). They should be kept in sync.
IteratePointer(v, kRelocationInfoOffset);
IteratePointer(v, kHandlerTableOffset);
IteratePointer(v, kDeoptimizationDataOffset);
@@ -657,8 +698,8 @@ void Code::CodeIterateBody(Heap* heap) {
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
- // There are two places where we iterate code bodies: here and the
- // non-templated CodeIterateBody (above). They should be kept in sync.
+ // There are two places where we iterate code bodies: here and the non-
+ // templated CodeIterateBody (above). They should be kept in sync.
StaticVisitor::VisitPointer(
heap,
reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
diff --git a/src/3rdparty/v8/src/objects-visiting.cc b/src/3rdparty/v8/src/objects-visiting.cc
index 6ae4d7c..088f5eb 100644
--- a/src/3rdparty/v8/src/objects-visiting.cc
+++ b/src/3rdparty/v8/src/objects-visiting.cc
@@ -46,7 +46,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqAsciiString;
+ return kVisitSeqOneByteString;
} else {
return kVisitSeqTwoByteString;
}
@@ -128,6 +128,11 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
kVisitDataObjectGeneric,
Foreign::kSize);
+ case SYMBOL_TYPE:
+ return GetVisitorIdForSize(kVisitDataObject,
+ kVisitDataObjectGeneric,
+ Symbol::kSize);
+
case FILLER_TYPE:
return kVisitDataObjectGeneric;
diff --git a/src/3rdparty/v8/src/objects-visiting.h b/src/3rdparty/v8/src/objects-visiting.h
index 3937e25..9b2422c 100644
--- a/src/3rdparty/v8/src/objects-visiting.h
+++ b/src/3rdparty/v8/src/objects-visiting.h
@@ -47,7 +47,7 @@ namespace internal {
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
@@ -221,7 +221,7 @@ class BodyVisitorBase : public AllStatic {
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
+ INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
int object_size = BodyDescriptor::SizeOf(map, object);
BodyVisitorBase<StaticVisitor>::IteratePointers(
map->GetHeap(),
@@ -247,7 +247,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
public:
- static inline ReturnType Visit(Map* map, HeapObject* object) {
+ INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
BodyVisitorBase<StaticVisitor>::IteratePointers(
map->GetHeap(),
object,
@@ -279,16 +279,16 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
public:
static void Initialize();
- static inline int IterateBody(Map* map, HeapObject* obj) {
+ INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
return table_.GetVisitor(map)(map, obj);
}
- static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
+ INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
}
private:
- static inline int VisitJSFunction(Map* map, HeapObject* object) {
+ INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
Heap* heap = map->GetHeap();
VisitPointers(heap,
HeapObject::RawField(object, JSFunction::kPropertiesOffset),
@@ -305,30 +305,30 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return JSFunction::kSize;
}
- static inline int VisitByteArray(Map* map, HeapObject* object) {
+ INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
- static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) {
+ INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
return FixedDoubleArray::SizeFor(length);
}
- static inline int VisitJSObject(Map* map, HeapObject* object) {
+ INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
return JSObjectVisitor::Visit(map, object);
}
- static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
- return SeqAsciiString::cast(object)->
- SeqAsciiStringSize(map->instance_type());
+ INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
+ return SeqOneByteString::cast(object)->
+ SeqOneByteStringSize(map->instance_type());
}
- static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
+ INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
return SeqTwoByteString::cast(object)->
SeqTwoByteStringSize(map->instance_type());
}
- static inline int VisitFreeSpace(Map* map, HeapObject* object) {
+ INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
return FreeSpace::cast(object)->Size();
}
@@ -339,7 +339,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return object_size;
}
- static inline int Visit(Map* map, HeapObject* object) {
+ INLINE(static int Visit(Map* map, HeapObject* object)) {
return map->instance_size();
}
};
@@ -382,18 +382,18 @@ class StaticMarkingVisitor : public StaticVisitorBase {
public:
static void Initialize();
- static inline void IterateBody(Map* map, HeapObject* obj) {
+ INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
table_.GetVisitor(map)(map, obj);
}
- static inline void VisitCodeEntry(Heap* heap, Address entry_address);
- static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo);
- static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo);
- static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo);
- static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo);
- static inline void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo);
- static inline void VisitExternalReference(RelocInfo* rinfo) { }
- static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
+ INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+ INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
+ INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
+ INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
@@ -401,12 +401,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
protected:
- static inline void VisitMap(Map* map, HeapObject* object);
- static inline void VisitCode(Map* map, HeapObject* object);
- static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object);
- static inline void VisitJSFunction(Map* map, HeapObject* object);
- static inline void VisitJSRegExp(Map* map, HeapObject* object);
- static inline void VisitNativeContext(Map* map, HeapObject* object);
+ INLINE(static void VisitMap(Map* map, HeapObject* object));
+ INLINE(static void VisitCode(Map* map, HeapObject* object));
+ INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+ INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
+ INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+ INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
// treating transitions or back pointers weak.
@@ -414,8 +414,8 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
// Code flushing support.
- static inline bool IsFlushable(Heap* heap, JSFunction* function);
- static inline bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info);
+ INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
+ INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
// Helpers used by code flushing support that visit pointer fields and treat
// references to code objects either strongly or weakly.
@@ -430,11 +430,15 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitSpecialized(Map* map, HeapObject* object) {
}
- static inline void Visit(Map* map, HeapObject* object) {
+ INLINE(static void Visit(Map* map, HeapObject* object)) {
}
};
typedef FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void> FixedArrayVisitor;
+
+ typedef FlexibleBodyVisitor<StaticVisitor,
JSObject::BodyDescriptor,
void> JSObjectVisitor;
diff --git a/src/3rdparty/v8/src/objects.cc b/src/3rdparty/v8/src/objects.cc
index 81cd105..bb185a5 100644
--- a/src/3rdparty/v8/src/objects.cc
+++ b/src/3rdparty/v8/src/objects.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "accessors.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -134,13 +135,15 @@ void Object::Lookup(String* name, LookupResult* result) {
if (IsJSReceiver()) {
holder = this;
} else {
- Context* native_context = Isolate::Current()->context()->native_context();
+ Context* native_context = result->isolate()->context()->native_context();
if (IsNumber()) {
holder = native_context->number_function()->instance_prototype();
} else if (IsString()) {
holder = native_context->string_function()->instance_prototype();
} else if (IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
+ } else if (IsSymbol()) {
+ holder = native_context->symbol_delegate();
} else {
Isolate::Current()->PushStackTraceAndDie(
0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
@@ -179,11 +182,11 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
}
// api style callbacks.
- if (structure->IsAccessorInfo()) {
- AccessorInfo* data = AccessorInfo::cast(structure);
+ if (structure->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
if (!data->IsCompatibleReceiver(receiver)) {
- Handle<Object> name_handle(name);
- Handle<Object> receiver_handle(receiver);
+ Handle<Object> name_handle(name, isolate);
+ Handle<Object> receiver_handle(receiver, isolate);
Handle<Object> args[2] = { name_handle, receiver_handle };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
@@ -226,6 +229,11 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver,
return isolate->heap()->undefined_value();
}
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) {
+ return isolate->heap()->undefined_value();
+ }
+
UNREACHABLE();
return NULL;
}
@@ -235,8 +243,8 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
String* name_raw) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(receiver_raw);
- Handle<Object> name(name_raw);
+ Handle<Object> receiver(receiver_raw, isolate);
+ Handle<Object> name(name_raw, isolate);
Handle<Object> args[] = { receiver, name };
Handle<Object> result = CallTrap(
@@ -247,6 +255,18 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
}
+Handle<Object> Object::GetProperty(Handle<Object> object, Handle<String> name) {
+ // TODO(rossberg): The index test should not be here but in the GetProperty
+ // method (or somewhere else entirely). Needs more global clean-up.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) return GetElement(object, index);
+ Isolate* isolate = object->IsHeapObject()
+ ? Handle<HeapObject>::cast(object)->GetIsolate()
+ : Isolate::Current();
+ CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
+}
+
+
Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
Isolate* isolate = object->IsHeapObject()
? Handle<HeapObject>::cast(object)->GetIsolate()
@@ -285,11 +305,12 @@ bool JSProxy::HasElementWithHandler(uint32_t index) {
MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver,
JSReceiver* getter) {
- HandleScope scope;
+ Isolate* isolate = getter->GetIsolate();
+ HandleScope scope(isolate);
Handle<JSReceiver> fun(getter);
- Handle<Object> self(receiver);
+ Handle<Object> self(receiver, isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
- Debug* debug = fun->GetHeap()->isolate()->debug();
+ Debug* debug = isolate->debug();
// Handle stepping into a getter if step into is active.
// TODO(rossberg): should this apply to getters that are function proxies?
if (debug->StepInActive() && fun->IsJSFunction()) {
@@ -594,7 +615,8 @@ MaybeObject* Object::GetProperty(Object* receiver,
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- Heap* heap = name->GetHeap();
+ Isolate* isolate = name->GetIsolate();
+ Heap* heap = isolate->heap();
// Traverse the prototype chain from the current object (this) to
// the holder and check for access rights. This avoids traversing the
@@ -604,16 +626,20 @@ MaybeObject* Object::GetProperty(Object* receiver,
// holder in the prototype chain.
// Proxy handlers do not use the proxy's prototype, so we can skip this.
if (!result->IsHandler()) {
- Object* last = result->IsProperty()
+ Object* last = result->IsProperty() && !receiver->IsSymbol()
? result->holder()
: Object::cast(heap->null_value());
- ASSERT(this != this->GetPrototype());
- for (Object* current = this; true; current = current->GetPrototype()) {
+ ASSERT(this != this->GetPrototype(isolate));
+ for (Object* current = this;
+ true;
+ current = current->GetPrototype(isolate)) {
if (current->IsAccessCheckNeeded()) {
// Check if we're allowed to read from the current object. Note
// that even though we may not actually end up loading the named
// property from the current object, we still check that we have
// access to it.
+ // TODO(dcarney): revert.
+ CHECK(current->IsJSObject());
JSObject* checked = JSObject::cast(current);
if (!heap->isolate()->MayNamedAccess(checked, name, v8::ACCESS_GET)) {
return checked->GetPropertyWithFailedAccessCheck(receiver,
@@ -641,7 +667,8 @@ MaybeObject* Object::GetProperty(Object* receiver,
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
- value = result->holder()->FastPropertyAt(result->GetFieldIndex());
+ value = result->holder()->FastPropertyAt(
+ result->GetFieldIndex().field_index());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
@@ -665,18 +692,18 @@ MaybeObject* Object::GetProperty(Object* receiver,
MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
- Heap* heap = IsSmi()
- ? Isolate::Current()->heap()
- : HeapObject::cast(this)->GetHeap();
+ Isolate* isolate = IsSmi()
+ ? Isolate::Current()
+ : HeapObject::cast(this)->GetIsolate();
+ Heap* heap = isolate->heap();
Object* holder = this;
// Iterate up the prototype chain until an element is found or the null
// prototype is encountered.
for (holder = this;
holder != heap->null_value();
- holder = holder->GetPrototype()) {
+ holder = holder->GetPrototype(isolate)) {
if (!holder->IsJSObject()) {
- Isolate* isolate = heap->isolate();
Context* native_context = isolate->context()->native_context();
if (holder->IsNumber()) {
holder = native_context->number_function()->instance_prototype();
@@ -684,6 +711,8 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
holder = native_context->string_function()->instance_prototype();
} else if (holder->IsBoolean()) {
holder = native_context->boolean_function()->instance_prototype();
+ } else if (holder->IsSymbol()) {
+ holder = native_context->symbol_delegate();
} else if (holder->IsJSProxy()) {
return JSProxy::cast(holder)->GetElementWithHandler(receiver, index);
} else {
@@ -722,10 +751,9 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) {
}
-Object* Object::GetPrototype() {
+Object* Object::GetPrototype(Isolate* isolate) {
if (IsSmi()) {
- Heap* heap = Isolate::Current()->heap();
- Context* context = heap->isolate()->context()->native_context();
+ Context* context = isolate->context()->native_context();
return context->number_function()->instance_prototype();
}
@@ -736,8 +764,7 @@ Object* Object::GetPrototype() {
if (heap_object->IsJSReceiver()) {
return heap_object->map()->prototype();
}
- Heap* heap = heap_object->GetHeap();
- Context* context = heap->isolate()->context()->native_context();
+ Context* context = isolate->context()->native_context();
if (heap_object->IsHeapNumber()) {
return context->number_function()->instance_prototype();
@@ -748,11 +775,21 @@ Object* Object::GetPrototype() {
if (heap_object->IsBoolean()) {
return context->boolean_function()->instance_prototype();
} else {
- return heap->null_value();
+ return isolate->heap()->null_value();
}
}
+Object* Object::GetDelegate(Isolate* isolate) {
+ if (IsSymbol()) {
+ Heap* heap = Symbol::cast(this)->GetHeap();
+ Context* context = heap->isolate()->context()->native_context();
+ return context->symbol_delegate();
+ }
+ return GetPrototype(isolate);
+}
+
+
MaybeObject* Object::GetHash(CreationFlag flag) {
// The object is either a number, a string, an odd-ball,
// a real JS object, or a Harmony proxy.
@@ -760,8 +797,8 @@ MaybeObject* Object::GetHash(CreationFlag flag) {
uint32_t hash = ComputeLongHash(double_to_uint64(Number()));
return Smi::FromInt(hash & Smi::kMaxValue);
}
- if (IsString()) {
- uint32_t hash = String::cast(this)->Hash();
+ if (IsName()) {
+ uint32_t hash = Name::cast(this)->Hash();
return Smi::FromInt(hash);
}
if (IsOddball()) {
@@ -881,14 +918,15 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
int len = length();
Object* object;
String* result;
- if (IsAsciiRepresentation()) {
- { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
+ if (IsOneByteRepresentation()) {
+ { MaybeObject* maybe_object =
+ heap->AllocateRawOneByteString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
String* first = cs->first();
int first_length = first->length();
- char* dest = SeqAsciiString::cast(result)->GetChars();
+ uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
WriteToFlat(first, dest, 0, first_length);
String* second = cs->second();
WriteToFlat(second,
@@ -941,29 +979,34 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (size < ExternalString::kShortSize) {
return false;
}
- bool is_ascii = this->IsAsciiRepresentation();
- bool is_symbol = this->IsSymbol();
+ bool is_ascii = this->IsOneByteRepresentation();
+ bool is_internalized = this->IsInternalizedString();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
if (size >= ExternalString::kSize) {
this->set_map_no_write_barrier(
- is_symbol
- ? (is_ascii ? heap->external_symbol_with_ascii_data_map()
- : heap->external_symbol_map())
- : (is_ascii ? heap->external_string_with_ascii_data_map()
- : heap->external_string_map()));
+ is_internalized
+ ? (is_ascii
+ ? heap->external_internalized_string_with_ascii_data_map()
+ : heap->external_internalized_string_map())
+ : (is_ascii
+ ? heap->external_string_with_ascii_data_map()
+ : heap->external_string_map()));
} else {
this->set_map_no_write_barrier(
- is_symbol
- ? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
- : heap->short_external_symbol_map())
- : (is_ascii ? heap->short_external_string_with_ascii_data_map()
- : heap->short_external_string_map()));
+ is_internalized
+ ? (is_ascii
+ ? heap->
+ short_external_internalized_string_with_ascii_data_map()
+ : heap->short_external_internalized_string_map())
+ : (is_ascii
+ ? heap->short_external_string_with_ascii_data_map()
+ : heap->short_external_string_map()));
}
ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource);
- if (is_symbol) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
@@ -993,22 +1036,22 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
if (size < ExternalString::kShortSize) {
return false;
}
- bool is_symbol = this->IsSymbol();
+ bool is_internalized = this->IsInternalizedString();
// Morph the object to an external string by adjusting the map and
// reinitializing the fields. Use short version if space is limited.
if (size >= ExternalString::kSize) {
this->set_map_no_write_barrier(
- is_symbol ? heap->external_ascii_symbol_map()
- : heap->external_ascii_string_map());
+ is_internalized ? heap->external_ascii_internalized_string_map()
+ : heap->external_ascii_string_map());
} else {
this->set_map_no_write_barrier(
- is_symbol ? heap->short_external_ascii_symbol_map()
- : heap->short_external_ascii_string_map());
+ is_internalized ? heap->short_external_ascii_internalized_string_map()
+ : heap->short_external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
- if (is_symbol) self->Hash(); // Force regeneration of the hash value.
+ if (is_internalized) self->Hash(); // Force regeneration of the hash value.
// Fill the remainder of the string with dead wood.
int new_size = this->Size(); // Byte size of the external String object.
@@ -1033,7 +1076,8 @@ void String::StringShortPrint(StringStream* accumulator) {
return;
}
- StringInputBuffer buf(this);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(this, &op);
bool truncated = false;
if (len > kMaxShortPrintLength) {
@@ -1042,17 +1086,17 @@ void String::StringShortPrint(StringStream* accumulator) {
}
bool ascii = true;
for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
+ uint16_t c = stream.GetNext();
if (c < 32 || c >= 127) {
ascii = false;
}
}
- buf.Reset(this);
+ stream.Reset(this);
if (ascii) {
accumulator->Add("<String[%u]: ", length());
for (int i = 0; i < len; i++) {
- accumulator->Put(buf.GetNext());
+ accumulator->Put(static_cast<char>(stream.GetNext()));
}
accumulator->Put('>');
} else {
@@ -1060,7 +1104,7 @@ void String::StringShortPrint(StringStream* accumulator) {
// characters and that backslashes are therefore escaped.
accumulator->Add("<String[%u]\\: ", length());
for (int i = 0; i < len; i++) {
- int c = buf.GetNext();
+ uint16_t c = stream.GetNext();
if (c == '\n') {
accumulator->Add("\\n");
} else if (c == '\r') {
@@ -1070,7 +1114,7 @@ void String::StringShortPrint(StringStream* accumulator) {
} else if (c < 32 || c > 126) {
accumulator->Add("\\x%02x", c);
} else {
- accumulator->Put(c);
+ accumulator->Put(static_cast<char>(c));
}
}
if (truncated) {
@@ -1118,6 +1162,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
break;
}
+ case JS_MODULE_TYPE: {
+ accumulator->Add("<JS Module>");
+ break;
+ }
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
@@ -1173,7 +1221,7 @@ void JSObject::PrintElementsTransition(
FPrintF(file, " -> ");
PrintElementsKind(file, to_kind);
FPrintF(file, "] in ");
- JavaScriptFrame::PrintTop(file, false, true);
+ JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
FPrintF(file, " for ");
ShortPrint(file);
FPrintF(file, " from ");
@@ -1291,6 +1339,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
accumulator->Add("<Odd Oddball>");
break;
}
+ case SYMBOL_TYPE:
+ accumulator->Add("<Symbol: %d>", Symbol::cast(this)->Hash());
+ break;
case HEAP_NUMBER_TYPE:
accumulator->Add("<Number: ");
HeapNumber::cast(this)->HeapNumberPrint(accumulator);
@@ -1399,6 +1450,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_GLOBAL_PROPERTY_CELL_TYPE:
JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v);
break;
+ case SYMBOL_TYPE:
case HEAP_NUMBER_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
@@ -1473,14 +1525,14 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
String* JSReceiver::class_name() {
if (IsJSFunction() && IsJSFunctionProxy()) {
- return GetHeap()->function_class_symbol();
+ return GetHeap()->function_class_string();
}
if (map()->constructor()->IsJSFunction()) {
JSFunction* constructor = JSFunction::cast(map()->constructor());
return String::cast(constructor->shared()->instance_class_name());
}
// If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
+ return GetHeap()->Object_string();
}
@@ -1496,7 +1548,7 @@ String* JSReceiver::constructor_name() {
}
// TODO(rossberg): what about proxies?
// If the constructor is not present, return "Object".
- return GetHeap()->Object_symbol();
+ return GetHeap()->Object_string();
}
@@ -1518,15 +1570,16 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map,
}
-static bool IsIdentifier(UnicodeCache* cache,
- unibrow::CharacterStream* buffer) {
+static bool IsIdentifier(UnicodeCache* cache, String* string) {
// Checks whether the buffer contains an identifier (no escape).
- if (!buffer->has_more()) return false;
- if (!cache->IsIdentifierStart(buffer->GetNext())) {
+ if (string->length() == 0) return false;
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ if (!cache->IsIdentifierStart(stream.GetNext())) {
return false;
}
- while (buffer->has_more()) {
- if (!cache->IsIdentifierPart(buffer->GetNext())) {
+ while (stream.HasMore()) {
+ if (!cache->IsIdentifierPart(stream.GetNext())) {
return false;
}
}
@@ -1544,12 +1597,11 @@ MaybeObject* JSObject::AddFastProperty(String* name,
name, map()->NumberOfOwnDescriptors()));
// Normalize the object if the name is an actual string (not the
- // hidden symbols) and is not a real identifier.
+ // hidden strings) and is not a real identifier.
// Normalize the object if it will have too many fast properties.
Isolate* isolate = GetHeap()->isolate();
- StringInputBuffer buffer(name);
- if ((!IsIdentifier(isolate->unicode_cache(), &buffer)
- && name != isolate->heap()->hidden_symbol()) ||
+ if ((!IsIdentifier(isolate->unicode_cache(), name)
+ && name != isolate->heap()->hidden_string()) ||
(map()->unused_property_fields() == 0 &&
TooManyFastProperties(properties()->length(), store_mode))) {
Object* obj;
@@ -1579,10 +1631,7 @@ MaybeObject* JSObject::AddFastProperty(String* name,
if (!maybe_values->To(&values)) return maybe_values;
}
- // Only allow map transition if the object isn't the global object.
- TransitionFlag flag = isolate->empty_object_map() != map()
- ? INSERT_TRANSITION
- : OMIT_TRANSITION;
+ TransitionFlag flag = INSERT_TRANSITION;
Map* new_map;
MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag);
@@ -1608,15 +1657,11 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes, 0);
- Heap* heap = GetHeap();
TransitionFlag flag =
- // Do not add transitions to the empty object map (map of "new Object()"),
- // nor to global objects.
- (map() == heap->isolate()->empty_object_map() || IsGlobalObject() ||
+ // Do not add transitions to global objects.
+ (IsGlobalObject() ||
// Don't add transitions to special properties with non-trivial
// attributes.
- // TODO(verwaest): Once we support attribute changes, these transitions
- // should be kept as well.
attributes != NONE)
? OMIT_TRANSITION
: INSERT_TRANSITION;
@@ -1677,6 +1722,7 @@ MaybeObject* JSObject::AddProperty(String* name,
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
MaybeObject* result;
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!map_of_this->is_extensible()) {
@@ -1684,7 +1730,7 @@ MaybeObject* JSObject::AddProperty(String* name,
return value;
} else {
Handle<Object> args[1] = {Handle<String>(name)};
- return heap->isolate()->Throw(
+ return isolate->Throw(
*FACTORY->NewTypeError("object_not_extensible",
HandleVector(args, 1)));
}
@@ -1714,11 +1760,13 @@ MaybeObject* JSObject::AddProperty(String* name,
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this), "new", handle(name),
- handle(heap->the_hole_value()));
+ EnqueueChangeRecord(handle(this, isolate),
+ "new",
+ handle(name, isolate),
+ handle(heap->the_hole_value(), isolate));
}
return *hresult;
@@ -1730,12 +1778,15 @@ void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
Handle<String> name,
Handle<Object> old_value) {
Isolate* isolate = object->GetIsolate();
- HandleScope scope;
- Handle<String> type = isolate->factory()->LookupAsciiSymbol(type_str);
+ HandleScope scope(isolate);
+ Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
+ if (object->IsJSGlobalObject()) {
+ object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
+ }
Handle<Object> args[] = { type, object, name, old_value };
bool threw;
Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
- Handle<Object>(isolate->heap()->undefined_value()),
+ isolate->factory()->undefined_value(),
old_value->IsTheHole() ? 3 : 4, args,
&threw);
ASSERT(!threw);
@@ -1813,10 +1864,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition(
if (!HasFastProperties()) return result;
- // This method should only be used to convert existing transitions. Objects
- // with the map of "new Object()" cannot have transitions in the first place.
+ // This method should only be used to convert existing transitions.
Map* new_map = map();
- ASSERT(new_map != GetIsolate()->empty_object_map());
// TODO(verwaest): From here on we lose existing map transitions, causing
// invalid back pointers. This will change once we can store multiple
@@ -1941,7 +1990,8 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
bool skip_fallback_interceptor) {
CALL_HEAP_FUNCTION(object->GetIsolate(),
object->SetProperty(*key, *value, attributes, strict_mode,
- MAY_BE_STORE_FROM_KEYED, skip_fallback_interceptor),
+ MAY_BE_STORE_FROM_KEYED,
+ skip_fallback_interceptor),
Object);
}
@@ -1953,7 +2003,7 @@ MaybeObject* JSReceiver::SetProperty(String* name,
JSReceiver::StoreFromKeyed store_mode,
bool skip_fallback_interceptor) {
LookupResult result(GetIsolate());
- LocalLookup(name, &result, skip_fallback_interceptor);
+ LocalLookup(name, &result, true, skip_fallback_interceptor);
if (!result.IsFound()) {
map()->LookupTransition(JSObject::cast(this), name, &result);
}
@@ -1987,12 +2037,12 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
return *value_handle;
}
- if (structure->IsAccessorInfo()) {
+ if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
- AccessorInfo* data = AccessorInfo::cast(structure);
+ ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure);
if (!data->IsCompatibleReceiver(this)) {
- Handle<Object> name_handle(name);
- Handle<Object> receiver_handle(this);
+ Handle<Object> name_handle(name, isolate);
+ Handle<Object> receiver_handle(this, isolate);
Handle<Object> args[2] = { name_handle, receiver_handle };
Handle<Object> error =
isolate->factory()->NewTypeError("incompatible_method_receiver",
@@ -2036,6 +2086,11 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure,
}
}
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) {
+ return value;
+ }
+
UNREACHABLE();
return NULL;
}
@@ -2073,10 +2128,10 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes(
Heap* heap = GetHeap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = pt->GetPrototype()) {
+ pt = pt->GetPrototype(GetIsolate())) {
if (pt->IsJSProxy()) {
String* name;
- MaybeObject* maybe = GetHeap()->Uint32ToString(index);
+ MaybeObject* maybe = heap->Uint32ToString(index);
if (!maybe->To<String>(&name)) {
*found = true; // Force abort
return maybe;
@@ -2156,7 +2211,8 @@ MaybeObject* JSObject::SetPropertyViaPrototypes(
if (!FLAG_es5_readonly) *done = false;
if (*done) {
if (strict_mode == kNonStrictMode) return value;
- Handle<Object> args[] = { Handle<Object>(name), Handle<Object>(this)};
+ Handle<Object> args[] = { Handle<Object>(name, isolate),
+ Handle<Object>(this, isolate)};
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
}
@@ -2239,13 +2295,13 @@ void Map::AppendCallbackDescriptors(Handle<Map> map,
ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks);
- // Ensure the keys are symbols before writing them into the instance
- // descriptor. Since it may cause a GC, it has to be done before we
+ // Ensure the keys are internalized strings before writing them into the
+ // instance descriptor. Since it may cause a GC, it has to be done before we
// temporarily put the heap in an invalid state while appending descriptors.
for (int i = 0; i < nof_callbacks; ++i) {
Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i)));
Handle<String> key =
- isolate->factory()->SymbolFromString(
+ isolate->factory()->InternalizedStringFromString(
Handle<String>(String::cast(entry->name())));
entry->set_name(*key);
}
@@ -2390,10 +2446,8 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
}
bool allow_store_transition =
- // Only remember the map transition if the object's map is NOT equal to
- // the global object_function's map and there is not an already existing
+ // Only remember the map transition if there is not an already existing
// non-matching element transition.
- (GetIsolate()->empty_object_map() != map()) &&
!start_map->IsUndefined() && !start_map->is_shared() &&
IsFastElementsKind(from_kind);
@@ -2439,7 +2493,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
+ FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -2477,10 +2531,11 @@ void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) {
void JSObject::LookupRealNamedPropertyInPrototypes(String* name,
LookupResult* result) {
- Heap* heap = GetHeap();
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
for (Object* pt = GetPrototype();
pt != heap->null_value();
- pt = pt->GetPrototype()) {
+ pt = pt->GetPrototype(isolate)) {
if (pt->IsJSProxy()) {
return result->HandlerResult(JSProxy::cast(pt));
}
@@ -2543,7 +2598,7 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> value_handle(value);
+ Handle<Object> value_handle(value, isolate);
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return *value_handle;
}
@@ -2568,8 +2623,8 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result,
bool JSProxy::HasPropertyWithHandler(String* name_raw) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<Object> receiver(this);
- Handle<Object> name(name_raw);
+ Handle<Object> receiver(this, isolate);
+ Handle<Object> name(name_raw, isolate);
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
@@ -2589,8 +2644,8 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler(
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw);
- Handle<Object> value(value_raw);
+ Handle<Object> name(name_raw, isolate);
+ Handle<Object> value(value_raw, isolate);
Handle<Object> args[] = { receiver, name, value };
CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args);
@@ -2611,8 +2666,8 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
Handle<JSProxy> proxy(this);
Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<Object> value(value_raw, isolate);
+ Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
*done = true; // except where redefined...
Handle<Object> args[] = { name };
@@ -2635,14 +2690,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
// [[GetProperty]] requires to check that all properties are configurable.
Handle<String> configurable_name =
- isolate->factory()->LookupAsciiSymbol("configurable_");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("configurable_"));
Handle<Object> configurable(
- v8::internal::GetProperty(desc, configurable_name));
+ v8::internal::GetProperty(isolate, desc, configurable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(configurable->IsTrue() || configurable->IsFalse());
if (configurable->IsFalse()) {
Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
@@ -2652,14 +2709,18 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
// Check for DataDescriptor.
Handle<String> hasWritable_name =
- isolate->factory()->LookupAsciiSymbol("hasWritable_");
- Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name));
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("hasWritable_"));
+ Handle<Object> hasWritable(
+ v8::internal::GetProperty(isolate, desc, hasWritable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse());
if (hasWritable->IsTrue()) {
Handle<String> writable_name =
- isolate->factory()->LookupAsciiSymbol("writable_");
- Handle<Object> writable(v8::internal::GetProperty(desc, writable_name));
+ isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("writable_"));
+ Handle<Object> writable(
+ v8::internal::GetProperty(isolate, desc, writable_name));
ASSERT(!isolate->has_pending_exception());
ASSERT(writable->IsTrue() || writable->IsFalse());
*done = writable->IsFalse();
@@ -2672,8 +2733,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler(
}
// We have an AccessorDescriptor.
- Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_");
- Handle<Object> setter(v8::internal::GetProperty(desc, set_name));
+ Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("set_"));
+ Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name));
ASSERT(!isolate->has_pending_exception());
if (!setter->IsUndefined()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
@@ -2694,7 +2756,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<JSProxy> receiver(this);
- Handle<Object> name(name_raw);
+ Handle<Object> name(name_raw, isolate);
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
@@ -2703,8 +2765,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler(
Object* bool_result = result->ToBoolean();
if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) {
- Handle<Object> handler(receiver->handler());
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete");
+ Handle<Object> handler(receiver->handler(), isolate);
+ Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("delete"));
Handle<Object> args[] = { handler, trap_name };
Handle<Object> error = isolate->factory()->NewTypeError(
"handler_failed", HandleVector(args, ARRAY_SIZE(args)));
@@ -2731,9 +2794,9 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
Handle<JSProxy> proxy(this);
- Handle<Object> handler(this->handler()); // Trap might morph proxy.
+ Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy.
Handle<JSReceiver> receiver(receiver_raw);
- Handle<Object> name(name_raw);
+ Handle<Object> name(name_raw, isolate);
Handle<Object> args[] = { name };
Handle<Object> result = CallTrap(
@@ -2750,19 +2813,22 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
if (has_pending_exception) return NONE;
// Convert result to PropertyAttributes.
- Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable");
- Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n));
+ Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("enumerable"));
+ Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n));
if (isolate->has_pending_exception()) return NONE;
- Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable");
- Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n));
+ Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("configurable"));
+ Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n));
if (isolate->has_pending_exception()) return NONE;
- Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable");
- Handle<Object> writable(v8::internal::GetProperty(desc, writ_n));
+ Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("writable"));
+ Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n));
if (isolate->has_pending_exception()) return NONE;
if (configurable->IsFalse()) {
- Handle<String> trap =
- isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor");
+ Handle<String> trap = isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
@@ -2820,10 +2886,10 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name,
int argc,
Handle<Object> argv[]) {
Isolate* isolate = GetIsolate();
- Handle<Object> handler(this->handler());
+ Handle<Object> handler(this->handler(), isolate);
- Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name);
- Handle<Object> trap(v8::internal::GetProperty(handler, trap_name));
+ Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name);
+ Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name));
if (isolate->has_pending_exception()) return trap;
if (trap->IsUndefined()) {
@@ -2857,25 +2923,26 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
StrictModeFlag strict_mode,
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
+ Isolate* isolate = heap->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
// Optimization for 2-byte strings often used as keys in a decompression
- // dictionary. We make these short keys into symbols to avoid constantly
+ // dictionary. We internalize these short keys to avoid constantly
// reallocating them.
- if (!name_raw->IsSymbol() && name_raw->length() <= 2) {
- Object* symbol_version;
- { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name_raw);
- if (maybe_symbol_version->ToObject(&symbol_version)) {
- name_raw = String::cast(symbol_version);
+ if (!name_raw->IsInternalizedString() && name_raw->length() <= 2) {
+ Object* internalized_version;
+ { MaybeObject* maybe_string_version = heap->InternalizeString(name_raw);
+ if (maybe_string_version->ToObject(&internalized_version)) {
+ name_raw = String::cast(internalized_version);
}
}
}
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
+ if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(
lookup, name_raw, value_raw, true, strict_mode);
}
@@ -2889,12 +2956,15 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
}
+ ASSERT(!lookup->IsFound() || lookup->holder() == this ||
+ lookup->holder()->map()->is_hidden_prototype());
+
// From this point on everything needs to be handlified, because
// SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
+ Handle<Object> value(value_raw, isolate);
if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
bool done = false;
@@ -2912,16 +2982,17 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (lookup->IsProperty() && lookup->IsReadOnly()) {
if (strict_mode == kStrictMode) {
Handle<Object> args[] = { name, self };
- return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
+ return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
return *value;
}
}
- Handle<Object> old_value(heap->the_hole_value());
- if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup->GetLazyValue());
+ Handle<Object> old_value(heap->the_hole_value(), isolate);
+ if (FLAG_harmony_observation &&
+ map()->is_observed() && lookup->IsDataProperty()) {
+ old_value = Object::GetProperty(self, name);
}
// This is a real property that is not read-only, or it is a
@@ -2929,31 +3000,28 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
MaybeObject* result = *value;
switch (lookup->type()) {
case NORMAL:
- result = self->SetNormalizedProperty(lookup, *value);
+ result = lookup->holder()->SetNormalizedProperty(lookup, *value);
break;
case FIELD:
- result = self->FastPropertyAtPut(lookup->GetFieldIndex(), *value);
+ result = lookup->holder()->FastPropertyAtPut(
+ lookup->GetFieldIndex().field_index(), *value);
break;
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
if (*value == lookup->GetConstantFunction()) return *value;
// Preserve the attributes of this existing property.
attributes = lookup->GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ result =
+ lookup->holder()->ConvertDescriptorToField(*name, *value, attributes);
break;
case CALLBACKS: {
Object* callback_object = lookup->GetCallbackObject();
- return self->SetPropertyWithCallback(callback_object,
- *name,
- *value,
- lookup->holder(),
- strict_mode);
+ return self->SetPropertyWithCallback(
+ callback_object, *name, *value, lookup->holder(), strict_mode);
}
case INTERCEPTOR:
- result = self->SetPropertyWithInterceptor(*name,
- *value,
- attributes,
- strict_mode);
+ result = lookup->holder()->SetPropertyWithInterceptor(
+ *name, *value, attributes, strict_mode);
break;
case TRANSITION: {
Map* transition_map = lookup->GetTransitionTarget();
@@ -2965,15 +3033,15 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(transition_map,
- *name,
- *value,
- field_index);
+ result = lookup->holder()->AddFastPropertyUsingMap(
+ transition_map, *name, *value, field_index);
} else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ result = lookup->holder()->ConvertDescriptorToField(
+ *name, *value, attributes);
}
} else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ result = lookup->holder()->ConvertDescriptorToField(
+ *name, *value, attributes);
} else {
ASSERT(details.type() == CONSTANT_FUNCTION);
@@ -2981,12 +3049,12 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (constant_function == *value) {
// If the same constant function is being added we can simply
// transition to the target map.
- self->set_map(transition_map);
+ lookup->holder()->set_map(transition_map);
result = constant_function;
} else {
// Otherwise, replace with a map transition to a new map with a FIELD,
// even if the value is a constant function.
- result = self->ConvertTransitionToMapTransition(
+ result = lookup->holder()->ConvertTransitionToMapTransition(
lookup->GetTransitionIndex(), *name, *value, attributes);
}
}
@@ -2998,16 +3066,16 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
if (FLAG_harmony_observation && map()->is_observed()) {
if (lookup->IsTransition()) {
EnqueueChangeRecord(self, "new", name, old_value);
} else {
- LookupResult new_lookup(self->GetIsolate());
- self->LocalLookup(*name, &new_lookup);
- ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
- if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+ LookupResult new_lookup(isolate);
+ self->LocalLookup(*name, &new_lookup, true);
+ if (new_lookup.IsDataProperty() &&
+ !Object::GetProperty(self, name)->SameValue(*old_value)) {
EnqueueChangeRecord(self, "updated", name, old_value);
}
}
@@ -3047,7 +3115,7 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
AssertNoContextChange ncc;
Isolate* isolate = GetIsolate();
LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup);
+ LocalLookup(name_raw, &lookup, true);
if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
@@ -3077,15 +3145,16 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
}
// From this point on everything needs to be handlified.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw);
+ Handle<Object> value(value_raw, isolate);
- Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
PropertyAttributes old_attributes = ABSENT;
- if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup.GetLazyValue());
+ bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ if (is_observed) {
+ if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name);
old_attributes = lookup.GetAttributes();
}
@@ -3098,7 +3167,8 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
break;
}
case FIELD:
- result = self->FastPropertyAtPut(lookup.GetFieldIndex(), *value);
+ result = self->FastPropertyAtPut(
+ lookup.GetFieldIndex().field_index(), *value);
break;
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
@@ -3146,19 +3216,22 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
+ if (is_observed) {
if (lookup.IsTransition()) {
EnqueueChangeRecord(self, "new", name, old_value);
+ } else if (old_value->IsTheHole()) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
} else {
LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup);
- ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
- if (old_value->IsTheHole() ||
- new_lookup.GetAttributes() != old_attributes) {
+ self->LocalLookup(*name, &new_lookup, true);
+ bool value_changed = new_lookup.IsDataProperty() &&
+ !old_value->SameValue(*Object::GetProperty(self, name));
+ if (new_lookup.GetAttributes() != old_attributes) {
+ if (!value_changed) old_value = isolate->factory()->the_hole_value();
EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
+ } else if (value_changed) {
EnqueueChangeRecord(self, "updated", name, old_value);
}
}
@@ -3300,7 +3373,7 @@ PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
}
// Named property.
LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup);
+ LocalLookup(name, &lookup, true);
return GetPropertyAttributeForResult(this, &lookup, name, false);
}
@@ -3371,7 +3444,7 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
VMState state(isolate, EXTERNAL);
result = getter(index, info);
}
- if (!result.IsEmpty()) return DONT_ENUM;
+ if (!result.IsEmpty()) return NONE;
}
return holder->GetElementAttributeWithoutInterceptor(
@@ -3381,28 +3454,25 @@ PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- PropertyAttributes attr = holder->GetElementsAccessor()->GetAttributes(
- *hreceiver, *holder, index);
+ PropertyAttributes attr = GetElementsAccessor()->GetAttributes(
+ receiver, this, index);
if (attr != ABSENT) return attr;
- if (holder->IsStringObjectWithCharacterAt(index)) {
+ // Handle [] on String objects.
+ if (IsStringObjectWithCharacterAt(index)) {
return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
}
if (!continue_search) return ABSENT;
- Object* pt = holder->GetPrototype();
+ Object* pt = GetPrototype();
if (pt->IsJSProxy()) {
// We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(*hreceiver, index);
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index);
}
if (pt->IsNull()) return ABSENT;
return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- *hreceiver, index, true);
+ receiver, index, true);
}
@@ -3590,6 +3660,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
}
set_map(new_map);
+ map_of_this->NotifyLeafMapLayoutChange();
set_properties(dictionary);
@@ -3737,7 +3808,7 @@ Smi* JSReceiver::GenerateIdentityHash() {
MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) {
- MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+ MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_string(),
hash);
if (maybe->IsFailure()) return maybe;
return this;
@@ -3753,14 +3824,14 @@ int JSObject::GetIdentityHash(Handle<JSObject> obj) {
MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) {
- Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol());
+ Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string());
if (stored_value->IsSmi()) return stored_value;
// Do not generate permanent identity hash code if not requested.
if (flag == OMIT_CREATION) return GetHeap()->undefined_value();
Smi* hash = GenerateIdentityHash();
- MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(),
+ MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(),
hash);
if (result->IsFailure()) return result;
if (result->ToObjectUnchecked()->IsUndefined()) {
@@ -3782,7 +3853,7 @@ MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) {
Object* JSObject::GetHiddenProperty(String* key) {
- ASSERT(key->IsSymbol());
+ ASSERT(key->IsInternalizedString());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3798,7 +3869,7 @@ Object* JSObject::GetHiddenProperty(String* key) {
if (inline_value->IsSmi()) {
// Handle inline-stored identity hash.
- if (key == GetHeap()->identity_hash_symbol()) {
+ if (key == GetHeap()->identity_hash_string()) {
return inline_value;
} else {
return GetHeap()->undefined_value();
@@ -3824,7 +3895,7 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj,
MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
- ASSERT(key->IsSymbol());
+ ASSERT(key->IsInternalizedString());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3840,7 +3911,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
// If there is no backing store yet, store the identity hash inline.
if (value->IsSmi() &&
- key == GetHeap()->identity_hash_symbol() &&
+ key == GetHeap()->identity_hash_string() &&
(inline_value->IsUndefined() || inline_value->IsSmi())) {
return SetHiddenPropertiesHashTable(value);
}
@@ -3865,7 +3936,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) {
void JSObject::DeleteHiddenProperty(String* key) {
- ASSERT(key->IsSymbol());
+ ASSERT(key->IsInternalizedString());
if (IsJSGlobalProxy()) {
// For a proxy, use the prototype as target object.
Object* proxy_parent = GetPrototype();
@@ -3881,7 +3952,7 @@ void JSObject::DeleteHiddenProperty(String* key) {
Object* inline_value = hidden_lookup->ToObjectUnchecked();
// We never delete (inline-stored) identity hashes.
- ASSERT(key != GetHeap()->identity_hash_symbol());
+ ASSERT(key != GetHeap()->identity_hash_string());
if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value);
@@ -3893,7 +3964,7 @@ void JSObject::DeleteHiddenProperty(String* key) {
bool JSObject::HasHiddenProperties() {
return GetPropertyAttributePostInterceptor(this,
- GetHeap()->hidden_symbol(),
+ GetHeap()->hidden_string(),
false) != ABSENT;
}
@@ -3904,13 +3975,13 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
Object* inline_value;
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
+ // in the descriptor array matches the hidden string. Since the
+ // hidden strings hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
inline_value =
@@ -3923,12 +3994,12 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
}
} else {
PropertyAttributes attributes;
- // You can't install a getter on a property indexed by the hidden symbol,
+ // You can't install a getter on a property indexed by the hidden string,
// so we can be sure that GetLocalPropertyPostInterceptor returns a real
// object.
inline_value =
GetLocalPropertyPostInterceptor(this,
- GetHeap()->hidden_symbol(),
+ GetHeap()->hidden_string(),
&attributes)->ToObjectUnchecked();
}
@@ -3948,7 +4019,7 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
// We were storing the identity hash inline and now allocated an actual
// dictionary. Put the identity hash into the new dictionary.
MaybeObject* insert_result =
- hashtable->Put(GetHeap()->identity_hash_symbol(), inline_value);
+ hashtable->Put(GetHeap()->identity_hash_string(), inline_value);
ObjectHashTable* new_table;
if (!insert_result->To(&new_table)) return insert_result;
// We expect no resizing for the first insert.
@@ -3956,7 +4027,7 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable(
}
MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ SetPropertyPostInterceptor(GetHeap()->hidden_string(),
hashtable,
DONT_ENUM,
kNonStrictMode,
@@ -3973,13 +4044,13 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
ASSERT(HasHiddenProperties() != value->IsSmi());
if (HasFastProperties()) {
// If the object has fast properties, check whether the first slot
- // in the descriptor array matches the hidden symbol. Since the
- // hidden symbols hash code is zero (and no other string has hash
+ // in the descriptor array matches the hidden string. Since the
+ // hidden strings hash code is zero (and no other string has hash
// code zero) it will always occupy the first entry if present.
DescriptorArray* descriptors = this->map()->instance_descriptors();
if (descriptors->number_of_descriptors() > 0) {
int sorted_index = descriptors->GetSortedKeyIndex(0);
- if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() &&
+ if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
sorted_index < map()->NumberOfOwnDescriptors()) {
ASSERT(descriptors->GetType(sorted_index) == FIELD);
this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index),
@@ -3989,7 +4060,7 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) {
}
}
MaybeObject* store_result =
- SetPropertyPostInterceptor(GetHeap()->hidden_symbol(),
+ SetPropertyPostInterceptor(GetHeap()->hidden_string(),
value,
DONT_ENUM,
kNonStrictMode,
@@ -4109,7 +4180,7 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
HandleScope scope(isolate);
- Handle<Object> holder(this);
+ Handle<Object> holder(this, isolate);
Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { name, holder };
Handle<Object> error =
@@ -4131,15 +4202,14 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
HandleScope scope(isolate);
Handle<JSObject> self(this);
- Handle<String> name;
- Handle<Object> old_value(isolate->heap()->the_hole_value());
- bool preexists = false;
- if (FLAG_harmony_observation && map()->is_observed()) {
- name = isolate->factory()->Uint32ToString(index);
- preexists = self->HasLocalElement(index);
- if (preexists) {
- // TODO(observe): only read & set old_value if it's not an accessor
- old_value = Object::GetElement(self, index);
+ Handle<Object> old_value;
+ bool should_enqueue_change_record = false;
+ if (FLAG_harmony_observation && self->map()->is_observed()) {
+ should_enqueue_change_record = self->HasLocalElement(index);
+ if (should_enqueue_change_record) {
+ old_value = self->GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(isolate->factory()->the_hole_value())
+ : Object::GetElement(self, index);
}
}
@@ -4152,11 +4222,11 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
- if (preexists && !self->HasLocalElement(index))
- EnqueueChangeRecord(self, "deleted", name, old_value);
+ if (should_enqueue_change_record && !self->HasLocalElement(index)) {
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ EnqueueChangeRecord(self, "deleted", name, old_value);
}
return *hresult;
@@ -4196,14 +4266,15 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
}
LookupResult lookup(isolate);
- LocalLookup(name, &lookup);
+ LocalLookup(name, &lookup, true);
if (!lookup.IsFound()) return isolate->heap()->true_value();
// Ignore attributes if forcing a deletion.
if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
+ Handle<Object> args[2] = { Handle<Object>(name, isolate),
+ Handle<Object>(this, isolate) };
return isolate->Throw(*isolate->factory()->NewTypeError(
"strict_delete_property", HandleVector(args, 2)));
}
@@ -4215,9 +4286,10 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
Handle<JSObject> self(this);
Handle<String> hname(name);
- Handle<Object> old_value(isolate->heap()->the_hole_value());
- if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup.GetLazyValue());
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
+ if (is_observed && lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(self, hname);
}
MaybeObject* result;
@@ -4239,11 +4311,10 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
}
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
- if (!self->HasLocalProperty(*hname))
- EnqueueChangeRecord(self, "deleted", hname, old_value);
+ if (is_observed && !self->HasLocalProperty(*hname)) {
+ EnqueueChangeRecord(self, "deleted", hname, old_value);
}
return *hresult;
@@ -4422,7 +4493,7 @@ MaybeObject* JSObject::PreventExtensions() {
// It's not possible to seal objects with external array elements
if (HasExternalArrayElements()) {
HandleScope scope(isolate);
- Handle<Object> object(this);
+ Handle<Object> object(this, isolate);
Handle<Object> error =
isolate->factory()->NewTypeError(
"cant_prevent_ext_external_array_elements",
@@ -4527,6 +4598,7 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
void JSReceiver::LocalLookup(String* name, LookupResult* result,
+ bool search_hidden_prototypes,
bool skip_fallback_interceptor) {
ASSERT(name->IsString());
@@ -4536,7 +4608,8 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(name, result);
+ return JSReceiver::cast(proto)->LocalLookup(
+ name, result, search_hidden_prototypes);
}
if (IsJSProxy()) {
@@ -4552,12 +4625,6 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
JSObject* js_object = JSObject::cast(this);
- // Check __proto__ before interceptor.
- if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) {
- result->ConstantResult(js_object);
- return;
- }
-
// Check for lookup interceptor except when bootstrapping.
bool wouldIntercept = js_object->HasNamedInterceptor() &&
!heap->isolate()->bootstrapper()->IsActive();
@@ -4568,10 +4635,23 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result,
js_object->LocalLookupRealNamedProperty(name, result);
+ if (result->IsFound()) return;
+
+ if (search_hidden_prototypes) {
+ Object* proto = js_object->GetPrototype();
+
+ if (proto->IsJSReceiver()) {
+ JSReceiver* receiver = JSReceiver::cast(proto);
+ if (receiver->map()->is_hidden_prototype()) {
+ receiver->LocalLookup(name, result, search_hidden_prototypes);
+ return;
+ }
+ }
+ }
+
if (wouldIntercept && !skip_fallback_interceptor && !result->IsProperty() &&
map()->named_interceptor_is_fallback()) {
result->InterceptorResult(js_object);
- return;
}
}
@@ -4584,8 +4664,7 @@ void JSReceiver::Lookup(String* name,
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name,
- result,
+ JSReceiver::cast(current)->LocalLookup(name, result, false,
skip_fallback_interceptor);
if (result->IsFound()) return;
}
@@ -4889,29 +4968,31 @@ MaybeObject* JSObject::DefineAccessor(String* name_raw,
if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
// From this point on everything needs to be handlified.
- HandleScope scope(GetIsolate());
+ HandleScope scope(isolate);
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> getter(getter_raw);
- Handle<Object> setter(setter_raw);
+ Handle<Object> getter(getter_raw, isolate);
+ Handle<Object> setter(setter_raw, isolate);
uint32_t index = 0;
bool is_element = name->AsArrayIndex(&index);
- Handle<Object> old_value(isolate->heap()->the_hole_value());
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
bool preexists = false;
- if (FLAG_harmony_observation && map()->is_observed()) {
+ if (is_observed) {
if (is_element) {
preexists = HasLocalElement(index);
- if (preexists) {
- // TODO(observe): distinguish the case where it's an accessor
+ if (preexists && self->GetLocalElementAccessorPair(index) == NULL) {
old_value = Object::GetElement(self, index);
}
} else {
LookupResult lookup(isolate);
- LocalLookup(*name, &lookup);
+ LocalLookup(*name, &lookup, true);
preexists = lookup.IsProperty();
- if (preexists) old_value = handle(lookup.GetLazyValue());
+ if (preexists && lookup.IsDataProperty()) {
+ old_value = Object::GetProperty(self, name);
+ }
}
}
@@ -4920,9 +5001,9 @@ MaybeObject* JSObject::DefineAccessor(String* name_raw,
self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
+ if (is_observed) {
const char* type = preexists ? "reconfigured" : "new";
EnqueueChangeRecord(self, type, name, old_value);
}
@@ -5107,7 +5188,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
} else {
// Lookup the name.
LookupResult result(isolate);
- LocalLookup(name, &result);
+ LocalLookup(name, &result, true);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
@@ -5233,7 +5314,6 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode,
result->set_inobject_properties(inobject_properties());
}
- result->set_code_cache(code_cache());
result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
result->set_dictionary_map(true);
@@ -5259,6 +5339,7 @@ MaybeObject* Map::CopyDropDescriptors() {
result->set_pre_allocated_property_fields(pre_allocated_property_fields());
result->set_is_shared(false);
result->ClearCodeCache(GetHeap());
+ NotifyLeafMapLayoutChange();
return result;
}
@@ -5455,8 +5536,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor,
TransitionFlag flag) {
DescriptorArray* descriptors = instance_descriptors();
- // Ensure the key is a symbol.
- MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ // Ensure the key is an internalized string.
+ MaybeObject* maybe_failure = descriptor->KeyToInternalizedString();
if (maybe_failure->IsFailure()) return maybe_failure;
int old_size = NumberOfOwnDescriptors();
@@ -5499,8 +5580,8 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor,
TransitionFlag flag) {
DescriptorArray* old_descriptors = instance_descriptors();
- // Ensure the key is a symbol.
- MaybeObject* maybe_result = descriptor->KeyToSymbol();
+ // Ensure the key is an internalized string.
+ MaybeObject* maybe_result = descriptor->KeyToInternalizedString();
if (maybe_result->IsFailure()) return maybe_result;
// We replace the key if it is already present.
@@ -5536,8 +5617,8 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors,
Descriptor* descriptor,
int insertion_index,
TransitionFlag flag) {
- // Ensure the key is a symbol.
- MaybeObject* maybe_failure = descriptor->KeyToSymbol();
+ // Ensure the key is an internalized string.
+ MaybeObject* maybe_failure = descriptor->KeyToInternalizedString();
if (maybe_failure->IsFailure()) return maybe_failure;
String* key = descriptor->GetKey();
@@ -6133,7 +6214,7 @@ Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps,
if (!cache()->IsUndefined()) {
PolymorphicCodeCacheHashTable* hash_table =
PolymorphicCodeCacheHashTable::cast(cache());
- return Handle<Object>(hash_table->Lookup(maps, flags));
+ return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate());
} else {
return GetIsolate()->factory()->undefined_value();
}
@@ -6522,13 +6603,13 @@ String::FlatContent String::GetFlatContent() {
shape.representation_tag() != kSlicedStringTag);
}
if (shape.encoding_tag() == kOneByteStringTag) {
- const char* start;
+ const uint8_t* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqAsciiString::cast(string)->GetChars();
+ start = SeqOneByteString::cast(string)->GetChars();
} else {
start = ExternalAsciiString::cast(string)->GetChars();
}
- return FlatContent(Vector<const char>(start + offset, length));
+ return FlatContent(Vector<const uint8_t>(start + offset, length));
} else {
ASSERT(shape.encoding_tag() == kTwoByteStringTag);
const uc16* start;
@@ -6556,14 +6637,14 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
if (length < 0) length = kMaxInt - offset;
// Compute the size of the UTF-8 string. Start at the specified offset.
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(offset, this);
+ Access<ConsStringIteratorOp> op(
+ heap->isolate()->objects_string_iterator());
+ StringCharacterStream stream(this, op.value(), offset);
int character_position = offset;
int utf8_bytes = 0;
int last = unibrow::Utf16::kNoPreviousCharacter;
- while (buffer->has_more() && character_position++ < offset + length) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
utf8_bytes += unibrow::Utf8::Length(character, last);
last = character;
}
@@ -6575,13 +6656,12 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
char* result = NewArray<char>(utf8_bytes + 1);
// Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset.
- buffer->Rewind();
- buffer->Seek(offset);
+ stream.Reset(this, offset);
character_position = offset;
int utf8_byte_position = 0;
last = unibrow::Utf16::kNoPreviousCharacter;
- while (buffer->has_more() && character_position++ < offset + length) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore() && character_position++ < offset + length) {
+ uint16_t character = stream.GetNext();
if (allow_nulls == DISALLOW_NULLS && character == 0) {
character = ' ';
}
@@ -6607,7 +6687,7 @@ const uc16* String::GetTwoByteData() {
const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsAsciiRepresentationUnderneath());
+ ASSERT(!IsOneByteRepresentationUnderneath());
switch (StringShape(this).representation_tag()) {
case kSeqStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
@@ -6633,15 +6713,15 @@ SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
}
Heap* heap = GetHeap();
- Access<StringInputBuffer> buffer(
- heap->isolate()->objects_string_input_buffer());
- buffer->Reset(this);
+ Access<ConsStringIteratorOp> op(
+ heap->isolate()->objects_string_iterator());
+ StringCharacterStream stream(this, op.value());
uc16* result = NewArray<uc16>(length() + 1);
int i = 0;
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
result[i++] = character;
}
result[i] = 0;
@@ -6655,252 +6735,6 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
}
-void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- while (chars_read < max_chars) {
- uint16_t c = *reinterpret_cast<uint16_t*>(
- reinterpret_cast<char*>(this) -
- kHeapObjectTag + kHeaderSize + offset * kShortSize);
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor)) {
- break;
- }
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
- kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit. Since this is not an
-// -IntoBuffer method it can delegate to one of the efficient
-// *AsciiStringReadBlock routines.
-const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
- while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- (max_chars <= left_length - offset ||
- (rbb->capacity <= left_length - offset &&
- (max_chars = left_length - offset, true)))) { // comma operator!
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree. The assignment on the left of the comma operator is
- // in order to make use of the fact that the -IntoBuffer routines can
- // produce at most 'capacity' characters. This enables us to postpone
- // the point where we switch to the -IntoBuffer routines (below) in order
- // to maximize the chances of delegating a big chunk of work to the
- // efficient *AsciiStringReadBlock routines.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- String* right = current->second();
- offset -= left_length;
- offset_correction += left_length;
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- const unibrow::byte* answer =
- String::ReadBlock(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return answer;
- }
- } else {
- // The block to be read spans two sides of the ConsString, so we call the
- // -IntoBuffer version, which will recurse. The -IntoBuffer methods
- // are able to assemble data from several part strings because they use
- // the util_buffer to store their data and never return direct pointers
- // to their storage. We don't try to read more than the buffer capacity
- // here or we can get too much recursion.
- ASSERT(rbb->remaining == 0);
- ASSERT(rbb->cursor == 0);
- current->ConsStringReadBlockIntoBuffer(
- rbb,
- &offset,
- max_chars > rbb->capacity ? rbb->capacity : max_chars);
- *offset_ptr = offset + offset_correction;
- return rbb->util_buffer;
- }
- }
-}
-
-
-const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock(
- unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
- // Cast const char* to unibrow::byte* (signedness difference).
- const unibrow::byte* b =
- reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr;
- *remaining = max_chars;
- *offset_ptr += max_chars;
- return b;
-}
-
-
-void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned chars_read = 0;
- unsigned offset = *offset_ptr;
- const uint16_t* data = GetChars();
- while (chars_read < max_chars) {
- uint16_t c = data[offset];
- if (c <= kMaxAsciiCharCode) {
- // Fast case for ASCII characters. Cursor is an input output argument.
- if (!unibrow::CharacterStream::EncodeAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- } else {
- if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c,
- rbb->util_buffer,
- rbb->capacity,
- rbb->cursor))
- break;
- }
- offset++;
- chars_read++;
- }
- *offset_ptr = offset;
- rbb->remaining += chars_read;
-}
-
-
-void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor,
- reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize +
- *offset_ptr * kCharSize,
- max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer(
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- unsigned capacity = rbb->capacity - rbb->cursor;
- if (max_chars > capacity) max_chars = capacity;
- memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars);
- rbb->remaining += max_chars;
- *offset_ptr += max_chars;
- rbb->cursor += max_chars;
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer, or returns a pointer to a buffer
-// where they can be found. The pointer is not necessarily valid across a GC
-// (see AsciiStringReadBlock).
-const unibrow::byte* String::ReadBlock(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= static_cast<unsigned>(input->length()));
- if (max_chars == 0) {
- rbb->remaining = 0;
- return NULL;
- }
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString* str = SeqAsciiString::cast(input);
- return str->SeqAsciiStringReadBlock(&rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- SeqTwoByteString* str = SeqTwoByteString::cast(input);
- str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- case kConsStringTag:
- return ConsString::cast(input)->ConsStringReadBlock(rbb,
- offset_ptr,
- max_chars);
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
- &rbb->remaining,
- offset_ptr,
- max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return rbb->util_buffer;
- }
- case kSlicedStringTag:
- return SlicedString::cast(input)->SlicedStringReadBlock(rbb,
- offset_ptr,
- max_chars);
- default:
- break;
- }
-
- UNREACHABLE();
- return 0;
-}
-
-
void Relocatable::PostGarbageCollectionProcessing() {
Isolate* isolate = Isolate::Current();
Relocatable* current = isolate->relocatable_top();
@@ -6978,168 +6812,145 @@ void FlatStringReader::PostGarbageCollection() {
ASSERT(content.IsFlat());
is_ascii_ = content.IsAscii();
if (is_ascii_) {
- start_ = content.ToAsciiVector().start();
+ start_ = content.ToOneByteVector().start();
} else {
start_ = content.ToUC16Vector().start();
}
}
-void StringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
+String* ConsStringIteratorOp::Operate(String* string,
+ unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out) {
+ ASSERT(string->IsConsString());
+ ConsString* cons_string = ConsString::cast(string);
+ // Set up search data.
+ root_ = cons_string;
+ consumed_ = *offset_out;
+ // Now search.
+ return Search(offset_out, type_out, length_out);
}
-void SafeStringInputBuffer::Seek(unsigned pos) {
- Reset(pos, input_);
-}
-
-
-// This method determines the type of string involved and then copies
-// a whole chunk of characters into a buffer. It can be used with strings
-// that have been glued together to form a ConsString and which must cooperate
-// to fill up a buffer.
-void String::ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- if (max_chars == 0) return;
-
- switch (StringShape(input).representation_tag()) {
- case kSeqStringTag:
- if (input->IsAsciiRepresentation()) {
- SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- } else {
- SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
+String* ConsStringIteratorOp::Search(unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out) {
+ ConsString* cons_string = root_;
+ // Reset the stack, pushing the root string.
+ depth_ = 1;
+ maximum_depth_ = 1;
+ frames_[0] = cons_string;
+ const unsigned consumed = consumed_;
+ unsigned offset = 0;
+ while (true) {
+ // Loop until the string is found which contains the target offset.
+ String* string = cons_string->first();
+ unsigned length = string->length();
+ int32_t type;
+ if (consumed < offset + length) {
+ // Target offset is in the left branch.
+ // Keep going if we're still in a ConString.
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
+ continue;
}
- case kConsStringTag:
- ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- case kExternalStringTag:
- if (input->IsAsciiRepresentation()) {
- ExternalAsciiString::cast(input)->
- ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
- } else {
- ExternalTwoByteString::cast(input)->
- ExternalTwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- }
- return;
- case kSlicedStringTag:
- SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
- return;
- default:
- break;
+ // Tell the stack we're done decending.
+ AdjustMaximumDepth();
+ } else {
+ // Descend right.
+ // Update progress through the string.
+ offset += length;
+ // Keep going if we're still in a ConString.
+ string = cons_string->second();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) == kConsStringTag) {
+ cons_string = ConsString::cast(string);
+ PushRight(cons_string);
+ // TODO(dcarney) Add back root optimization.
+ continue;
+ }
+ // Need this to be updated for the current string.
+ length = string->length();
+ // Account for the possibility of an empty right leaf.
+ // This happens only if we have asked for an offset outside the string.
+ if (length == 0) {
+ // Reset depth so future operations will return null immediately.
+ Reset();
+ return NULL;
+ }
+ // Tell the stack we're done decending.
+ AdjustMaximumDepth();
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ }
+ ASSERT(length != 0);
+ // Adjust return values and exit.
+ consumed_ = offset + length;
+ *offset_out = consumed - offset;
+ *type_out = type;
+ *length_out = length;
+ return string;
}
-
UNREACHABLE();
- return;
+ return NULL;
}
-const unibrow::byte* String::ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return answer;
-}
-
-
-const unibrow::byte* String::ReadBlock(String** raw_input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset_ptr) {
- Handle<String> input(raw_input);
- ASSERT(*offset_ptr <= (unsigned)input->length());
- unsigned chars = input->length() - *offset_ptr;
- if (chars > capacity) chars = capacity;
- ReadBlockBuffer rbb(util_buffer, 0, capacity, 0);
- ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars);
- ASSERT(rbb.remaining <= static_cast<unsigned>(input->length()));
- *remaining = rbb.remaining;
- return rbb.util_buffer;
-}
-
-
-// This will iterate unless the block of string data spans two 'halves' of
-// a ConsString, in which case it will recurse. Since the block of string
-// data to be read has a maximum size this limits the maximum recursion
-// depth to something sane. Since C++ does not have tail call recursion
-// elimination, the iteration must be explicit.
-void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
- unsigned* offset_ptr,
- unsigned max_chars) {
- ConsString* current = this;
- unsigned offset = *offset_ptr;
- int offset_correction = 0;
-
+String* ConsStringIteratorOp::NextLeaf(bool* blew_stack,
+ int32_t* type_out,
+ unsigned* length_out) {
while (true) {
- String* left = current->first();
- unsigned left_length = (unsigned)left->length();
- if (left_length > offset &&
- max_chars <= left_length - offset) {
- // Left hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- if (StringShape(left).IsCons()) {
- current = ConsString::cast(left);
- continue;
- } else {
- String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
- }
- } else if (left_length <= offset) {
- // Right hand side only - iterate unless we have reached the bottom of
- // the cons tree.
- offset -= left_length;
- offset_correction += left_length;
- String* right = current->second();
- if (StringShape(right).IsCons()) {
- current = ConsString::cast(right);
- continue;
- } else {
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- *offset_ptr = offset + offset_correction;
- return;
- }
- } else {
- // The block to be read spans two sides of the ConsString, so we recurse.
- // First recurse on the left.
- max_chars -= left_length - offset;
- String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset);
- // We may have reached the max or there may not have been enough space
- // in the buffer for the characters in the left hand side.
- if (offset == left_length) {
- // Recurse on the right.
- String* right = String::cast(current->second());
- offset -= left_length;
- offset_correction += left_length;
- String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars);
- }
- *offset_ptr = offset + offset_correction;
- return;
+ // Tree traversal complete.
+ if (depth_ == 0) {
+ *blew_stack = false;
+ return NULL;
+ }
+ // We've lost track of higher nodes.
+ if (maximum_depth_ - depth_ == kStackSize) {
+ *blew_stack = true;
+ return NULL;
+ }
+ // Go right.
+ ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)];
+ String* string = cons_string->second();
+ int32_t type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ // Pop stack so next iteration is in correct place.
+ Pop();
+ unsigned length = static_cast<unsigned>(string->length());
+ // Could be a flattened ConsString.
+ if (length == 0) continue;
+ *length_out = length;
+ *type_out = type;
+ consumed_ += length;
+ return string;
+ }
+ cons_string = ConsString::cast(string);
+ // TODO(dcarney) Add back root optimization.
+ PushRight(cons_string);
+ // Need to traverse all the way left.
+ while (true) {
+ // Continue left.
+ string = cons_string->first();
+ type = string->map()->instance_type();
+ if ((type & kStringRepresentationMask) != kConsStringTag) {
+ AdjustMaximumDepth();
+ unsigned length = static_cast<unsigned>(string->length());
+ ASSERT(length != 0);
+ *length_out = length;
+ *type_out = type;
+ consumed_ += length;
+ return string;
+ }
+ cons_string = ConsString::cast(string);
+ PushLeft(cons_string);
}
}
+ UNREACHABLE();
+ return NULL;
}
@@ -7179,26 +6990,6 @@ uint16_t SlicedString::SlicedStringGet(int index) {
}
-const unibrow::byte* SlicedString::SlicedStringReadBlock(
- ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
- unsigned offset = this->offset();
- *offset_ptr += offset;
- const unibrow::byte* answer = String::ReadBlock(String::cast(parent()),
- buffer, offset_ptr, chars);
- *offset_ptr -= offset;
- return answer;
-}
-
-
-void SlicedString::SlicedStringReadBlockIntoBuffer(
- ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) {
- unsigned offset = this->offset();
- *offset_ptr += offset;
- String::ReadBlockIntoBuffer(String::cast(parent()),
- buffer, offset_ptr, chars);
- *offset_ptr -= offset;
-}
-
template <typename sinkchar>
void String::WriteToFlat(String* src,
sinkchar* sink,
@@ -7226,7 +7017,7 @@ void String::WriteToFlat(String* src,
}
case kOneByteStringTag | kSeqStringTag: {
CopyChars(sink,
- SeqAsciiString::cast(source)->GetChars() + from,
+ SeqOneByteString::cast(source)->GetChars() + from,
to - from);
return;
}
@@ -7261,9 +7052,9 @@ void String::WriteToFlat(String* src,
// common case of sequential ascii right child.
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqAsciiString()) {
+ } else if (second->IsSeqOneByteString()) {
CopyChars(sink + boundary - from,
- SeqAsciiString::cast(second)->GetChars(),
+ SeqOneByteString::cast(second)->GetChars(),
to - boundary);
} else {
WriteToFlat(second,
@@ -7289,46 +7080,28 @@ void String::WriteToFlat(String* src,
}
-template <typename IteratorA, typename IteratorB>
-static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
- // General slow case check. We know that the ia and ib iterators
- // have the same length.
- while (ia->has_more()) {
- uint32_t ca = ia->GetNext();
- uint32_t cb = ib->GetNext();
- ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode);
- ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode);
- if (ca != cb)
- return false;
- }
- return true;
-}
-
-
// Compares the contents of two strings by reading and comparing
// int-sized blocks of characters.
template <typename Char>
-static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
- int length = a.length();
- ASSERT_EQ(length, b.length());
- const Char* pa = a.start();
- const Char* pb = b.start();
+static inline bool CompareRawStringContents(const Char* const a,
+ const Char* const b,
+ int length) {
int i = 0;
#ifndef V8_HOST_CAN_READ_UNALIGNED
// If this architecture isn't comfortable reading unaligned ints
// then we have to check that the strings are aligned before
// comparing them blockwise.
const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT
- uint32_t pa_addr = reinterpret_cast<uint32_t>(pa);
- uint32_t pb_addr = reinterpret_cast<uint32_t>(pb);
+ uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
+ uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
#endif
const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT
int endpoint = length - kStepSize;
// Compare blocks until we reach near the end of the string.
for (; i <= endpoint; i += kStepSize) {
- uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i);
- uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i);
+ uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
+ uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
if (wa != wb) {
return false;
}
@@ -7346,25 +7119,145 @@ static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) {
}
-template <typename IteratorA>
-static inline bool CompareStringContentsPartial(Isolate* isolate,
- IteratorA* ia,
- String* b) {
- String::FlatContent content = b->GetFlatContent();
- if (content.IsFlat()) {
- if (content.IsAscii()) {
- VectorIterator<char> ib(content.ToAsciiVector());
- return CompareStringContents(ia, &ib);
- } else {
- VectorIterator<uc16> ib(content.ToUC16Vector());
- return CompareStringContents(ia, &ib);
+template<typename Chars1, typename Chars2>
+class RawStringComparator : public AllStatic {
+ public:
+ static inline bool compare(const Chars1* a, const Chars2* b, int len) {
+ ASSERT(sizeof(Chars1) != sizeof(Chars2));
+ for (int i = 0; i < len; i++) {
+ if (a[i] != b[i]) {
+ return false;
+ }
}
- } else {
- isolate->objects_string_compare_buffer_b()->Reset(0, b);
- return CompareStringContents(ia,
- isolate->objects_string_compare_buffer_b());
+ return true;
}
-}
+};
+
+
+template<>
+class RawStringComparator<uint16_t, uint16_t> {
+ public:
+ static inline bool compare(const uint16_t* a, const uint16_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+
+template<>
+class RawStringComparator<uint8_t, uint8_t> {
+ public:
+ static inline bool compare(const uint8_t* a, const uint8_t* b, int len) {
+ return CompareRawStringContents(a, b, len);
+ }
+};
+
+
+class StringComparator {
+ class State {
+ public:
+ explicit inline State(ConsStringIteratorOp* op)
+ : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {}
+
+ inline void Init(String* string, unsigned len) {
+ op_->Reset();
+ int32_t type = string->map()->instance_type();
+ String::Visit(string, 0, *this, *op_, type, len);
+ }
+
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ is_one_byte_ = true;
+ buffer8_ = chars;
+ length_ = length;
+ }
+
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ is_one_byte_ = false;
+ buffer16_ = chars;
+ length_ = length;
+ }
+
+ void Advance(unsigned consumed) {
+ ASSERT(consumed <= length_);
+ // Still in buffer.
+ if (length_ != consumed) {
+ if (is_one_byte_) {
+ buffer8_ += consumed;
+ } else {
+ buffer16_ += consumed;
+ }
+ length_ -= consumed;
+ return;
+ }
+ // Advance state.
+ ASSERT(op_->HasMore());
+ int32_t type = 0;
+ unsigned length = 0;
+ String* next = op_->ContinueOperation(&type, &length);
+ ASSERT(next != NULL);
+ ConsStringNullOp null_op;
+ String::Visit(next, 0, *this, null_op, type, length);
+ }
+
+ ConsStringIteratorOp* const op_;
+ bool is_one_byte_;
+ unsigned length_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ DISALLOW_IMPLICIT_CONSTRUCTORS(State);
+ };
+
+ public:
+ inline StringComparator(ConsStringIteratorOp* op_1,
+ ConsStringIteratorOp* op_2)
+ : state_1_(op_1),
+ state_2_(op_2) {
+ }
+
+ template<typename Chars1, typename Chars2>
+ static inline bool Equals(State* state_1, State* state_2, unsigned to_check) {
+ const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_);
+ const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_);
+ return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check);
+ }
+
+ bool Equals(unsigned length, String* string_1, String* string_2) {
+ ASSERT(length != 0);
+ state_1_.Init(string_1, length);
+ state_2_.Init(string_2, length);
+ while (true) {
+ unsigned to_check = Min(state_1_.length_, state_2_.length_);
+ ASSERT(to_check > 0 && to_check <= length);
+ bool is_equal;
+ if (state_1_.is_one_byte_) {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ } else {
+ if (state_2_.is_one_byte_) {
+ is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check);
+ } else {
+ is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check);
+ }
+ }
+ // Looping done.
+ if (!is_equal) return false;
+ length -= to_check;
+ // Exit condition. Strings are equal.
+ if (length == 0) return true;
+ state_1_.Advance(to_check);
+ state_2_.Advance(to_check);
+ }
+ }
+
+ private:
+ State state_1_;
+ State state_2_;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringComparator);
+};
bool String::SlowEquals(String* other) {
@@ -7400,63 +7293,24 @@ bool String::SlowEquals(String* other) {
String* lhs = this->TryFlattenGetString();
String* rhs = other->TryFlattenGetString();
+ // TODO(dcarney): Compare all types of flat strings with a Visitor.
if (StringShape(lhs).IsSequentialAscii() &&
StringShape(rhs).IsSequentialAscii()) {
- const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
- const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
- return CompareRawStringContents(Vector<const char>(str1, len),
- Vector<const char>(str2, len));
+ const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars();
+ const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars();
+ return CompareRawStringContents(str1, str2, len);
}
Isolate* isolate = GetIsolate();
- String::FlatContent lhs_content = lhs->GetFlatContent();
- String::FlatContent rhs_content = rhs->GetFlatContent();
- if (lhs_content.IsFlat()) {
- if (lhs_content.IsAscii()) {
- Vector<const char> vec1 = lhs_content.ToAsciiVector();
- if (rhs_content.IsFlat()) {
- if (rhs_content.IsAscii()) {
- Vector<const char> vec2 = rhs_content.ToAsciiVector();
- return CompareRawStringContents(vec1, vec2);
- } else {
- VectorIterator<char> buf1(vec1);
- VectorIterator<uc16> ib(rhs_content.ToUC16Vector());
- return CompareStringContents(&buf1, &ib);
- }
- } else {
- VectorIterator<char> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- } else {
- Vector<const uc16> vec1 = lhs_content.ToUC16Vector();
- if (rhs_content.IsFlat()) {
- if (rhs_content.IsAscii()) {
- VectorIterator<uc16> buf1(vec1);
- VectorIterator<char> ib(rhs_content.ToAsciiVector());
- return CompareStringContents(&buf1, &ib);
- } else {
- Vector<const uc16> vec2(rhs_content.ToUC16Vector());
- return CompareRawStringContents(vec1, vec2);
- }
- } else {
- VectorIterator<uc16> buf1(vec1);
- isolate->objects_string_compare_buffer_b()->Reset(0, rhs);
- return CompareStringContents(&buf1,
- isolate->objects_string_compare_buffer_b());
- }
- }
- } else {
- isolate->objects_string_compare_buffer_a()->Reset(0, lhs);
- return CompareStringContentsPartial(isolate,
- isolate->objects_string_compare_buffer_a(), rhs);
- }
+ StringComparator comparator(isolate->objects_string_compare_iterator_a(),
+ isolate->objects_string_compare_iterator_b());
+
+ return comparator.Equals(static_cast<unsigned>(len), lhs, rhs);
}
bool String::MarkAsUndetectable() {
- if (StringShape(this).IsSymbol()) return false;
+ if (StringShape(this).IsInternalized()) return false;
Map* map = this->map();
Heap* heap = GetHeap();
@@ -7472,15 +7326,21 @@ bool String::MarkAsUndetectable() {
}
-bool String::IsEqualTo(Vector<const char> str) {
- Isolate* isolate = GetIsolate();
+bool String::IsUtf8EqualTo(Vector<const char> str) {
int slen = length();
- Access<UnicodeCache::Utf8Decoder>
- decoder(isolate->unicode_cache()->utf8_decoder());
- decoder->Reset(str.start(), str.length());
+ // Can't check exact length equality, but we can check bounds.
+ int str_len = str.length();
+ if (str_len < slen ||
+ str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize)) {
+ return false;
+ }
int i;
- for (i = 0; i < slen && decoder->has_more(); i++) {
- uint32_t r = decoder->GetNext();
+ unsigned remaining_in_str = static_cast<unsigned>(str_len);
+ const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start());
+ for (i = 0; i < slen && remaining_in_str > 0; i++) {
+ unsigned cursor = 0;
+ uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
+ ASSERT(cursor > 0 && cursor <= remaining_in_str);
if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
if (i > slen - 1) return false;
if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
@@ -7488,17 +7348,19 @@ bool String::IsEqualTo(Vector<const char> str) {
} else {
if (Get(i) != r) return false;
}
+ utf8_data += cursor;
+ remaining_in_str -= cursor;
}
- return i == slen && !decoder->has_more();
+ return i == slen && remaining_in_str == 0;
}
-bool String::IsAsciiEqualTo(Vector<const char> str) {
+bool String::IsOneByteEqualTo(Vector<const uint8_t> str) {
int slen = length();
if (str.length() != slen) return false;
FlatContent content = GetFlatContent();
if (content.IsAscii()) {
- return CompareChars(content.ToAsciiVector().start(),
+ return CompareChars(content.ToOneByteVector().start(),
str.start(), slen) == 0;
}
for (int i = 0; i < slen; i++) {
@@ -7522,28 +7384,62 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) {
}
+class IteratingStringHasher: public StringHasher {
+ public:
+ static inline uint32_t Hash(String* string, uint32_t seed) {
+ const unsigned len = static_cast<unsigned>(string->length());
+ IteratingStringHasher hasher(len, seed);
+ if (hasher.has_trivial_hash()) {
+ return hasher.GetHashField();
+ }
+ int32_t type = string->map()->instance_type();
+ ConsStringNullOp null_op;
+ String::Visit(string, 0, hasher, null_op, type, len);
+ // Flat strings terminate immediately.
+ if (hasher.consumed_ == len) {
+ ASSERT(!string->IsConsString());
+ return hasher.GetHashField();
+ }
+ ASSERT(string->IsConsString());
+ // This is a ConsString, iterate across it.
+ ConsStringIteratorOp op;
+ unsigned offset = 0;
+ unsigned leaf_length = len;
+ string = op.Operate(string, &offset, &type, &leaf_length);
+ while (true) {
+ ASSERT(hasher.consumed_ < len);
+ String::Visit(string, 0, hasher, null_op, type, leaf_length);
+ if (hasher.consumed_ == len) break;
+ string = op.ContinueOperation(&type, &leaf_length);
+ // This should be taken care of by the length check.
+ ASSERT(string != NULL);
+ }
+ return hasher.GetHashField();
+ }
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length) {
+ AddCharacters(chars, static_cast<int>(length));
+ consumed_ += length;
+ }
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length) {
+ AddCharacters(chars, static_cast<int>(length));
+ consumed_ += length;
+ }
+
+ private:
+ inline IteratingStringHasher(int len, uint32_t seed)
+ : StringHasher(len, seed),
+ consumed_(0) {}
+ unsigned consumed_;
+ DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
+};
+
+
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
ASSERT(!HasHashCode());
- const int len = length();
-
- // Compute the hash code.
- uint32_t field = 0;
- if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
- len,
- GetHeap()->HashSeed());
- } else if (StringShape(this).IsSequentialTwoByte()) {
- field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(),
- len,
- GetHeap()->HashSeed());
- } else {
- StringInputBuffer buffer(this);
- field = ComputeHashField(&buffer, len, GetHeap()->HashSeed());
- }
-
// Store the hash code in the object.
+ uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
set_hash_field(field);
// Check the hash code is there.
@@ -7554,11 +7450,12 @@ uint32_t String::ComputeAndSetHash() {
}
-bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length) {
+bool String::ComputeArrayIndex(uint32_t* index) {
+ int length = this->length();
if (length == 0 || length > kMaxArrayIndexSize) return false;
- uc32 ch = buffer->GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(this, &op);
+ uint16_t ch = stream.GetNext();
// If the string begins with a '0' character, it must only consist
// of it to be a legal array index.
@@ -7571,8 +7468,8 @@ bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer,
int d = ch - '0';
if (d < 0 || d > 9) return false;
uint32_t result = d;
- while (buffer->has_more()) {
- d = buffer->GetNext() - '0';
+ while (stream.HasMore()) {
+ d = stream.GetNext() - '0';
if (d < 0 || d > 9) return false;
// Check that the new result is below the 32 bit limit.
if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
@@ -7593,9 +7490,103 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
*index = (kArrayIndexHashMask & field) >> kHashShift;
return true;
} else {
- StringInputBuffer buffer(this);
- return ComputeArrayIndex(&buffer, index, length());
+ return ComputeArrayIndex(index);
+ }
+}
+
+
+String* SeqString::Truncate(int new_length) {
+ Heap* heap = GetHeap();
+ if (new_length <= 0) return heap->empty_string();
+
+ int string_size, allocated_string_size;
+ int old_length = length();
+ if (old_length <= new_length) return this;
+
+ if (IsSeqOneByteString()) {
+ allocated_string_size = SeqOneByteString::SizeFor(old_length);
+ string_size = SeqOneByteString::SizeFor(new_length);
+ } else {
+ allocated_string_size = SeqTwoByteString::SizeFor(old_length);
+ string_size = SeqTwoByteString::SizeFor(new_length);
+ }
+
+ int delta = allocated_string_size - string_size;
+ set_length(new_length);
+
+ // String sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ Address end_of_string = address() + string_size;
+ heap->CreateFillerObjectAt(end_of_string, delta);
+ if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
+ MemoryChunk::IncrementLiveBytesFromMutator(address(), -delta);
+ }
+ return this;
+}
+
+
+AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) {
+ // Currently, AllocationSiteInfo objects are only allocated immediately
+ // after JSArrays in NewSpace, and detecting whether a JSArray has one
+ // involves carefully checking the object immediately after the JSArray
+ // (if there is one) to see if it's an AllocationSiteInfo.
+ if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) {
+ Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) +
+ object->Size();
+ if ((ptr_end + AllocationSiteInfo::kSize) <=
+ object->GetHeap()->NewSpaceTop()) {
+ // There is room in newspace for allocation info. Do we have some?
+ Map** possible_allocation_site_info_map =
+ reinterpret_cast<Map**>(ptr_end);
+ if (*possible_allocation_site_info_map ==
+ object->GetHeap()->allocation_site_info_map()) {
+ AllocationSiteInfo* info = AllocationSiteInfo::cast(
+ reinterpret_cast<Object*>(ptr_end + 1));
+ return info;
+ }
+ }
+ }
+ return NULL;
+}
+
+
+bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) {
+ ASSERT(kind != NULL);
+ if (payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ *kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ return true;
+ }
}
+ return false;
+}
+
+
+// Heuristic: We only need to create allocation site info if the boilerplate
+// elements kind is the initial elements kind.
+AllocationSiteMode AllocationSiteInfo::GetMode(
+ ElementsKind boilerplate_elements_kind) {
+ if (FLAG_track_allocation_sites &&
+ IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ return TRACK_ALLOCATION_SITE;
+ }
+
+ return DONT_TRACK_ALLOCATION_SITE;
+}
+
+
+AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from,
+ ElementsKind to) {
+ if (FLAG_track_allocation_sites &&
+ IsFastSmiElementsKind(from) &&
+ (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) {
+ return TRACK_ALLOCATION_SITE;
+ }
+
+ return DONT_TRACK_ALLOCATION_SITE;
}
@@ -7617,57 +7608,64 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
}
-void StringHasher::AddSurrogatePair(uc32 c) {
- uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
- AddCharacter(lead);
- uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
- AddCharacter(trail);
-}
-
-
-void StringHasher::AddSurrogatePairNoIndex(uc32 c) {
- uint16_t lead = unibrow::Utf16::LeadSurrogate(c);
- AddCharacterNoIndex(lead);
- uint16_t trail = unibrow::Utf16::TrailSurrogate(c);
- AddCharacterNoIndex(trail);
-}
-
-
uint32_t StringHasher::GetHashField() {
if (length_ <= String::kMaxHashCalcLength) {
- if (is_array_index()) {
- return MakeArrayIndexHash(array_index(), length_);
+ if (is_array_index_) {
+ return MakeArrayIndexHash(array_index_, length_);
}
- return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
+ return (GetHashCore(raw_running_hash_) << String::kHashShift) |
+ String::kIsNotArrayIndexMask;
} else {
return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
}
}
-uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer,
- int length,
- uint32_t seed) {
- StringHasher hasher(length, seed);
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- return hasher.GetHashField();
- }
-
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (buffer->has_more() && hasher.is_array_index()) {
- hasher.AddCharacter(buffer->GetNext());
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (buffer->has_more()) {
- hasher.AddCharacterNoIndex(buffer->GetNext());
+uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
+ uint32_t seed,
+ int* utf16_length_out) {
+ int vector_length = chars.length();
+ // Handle some edge cases
+ if (vector_length <= 1) {
+ ASSERT(vector_length == 0 ||
+ static_cast<uint8_t>(chars.start()[0]) <=
+ unibrow::Utf8::kMaxOneByteChar);
+ *utf16_length_out = vector_length;
+ return HashSequentialString(chars.start(), vector_length, seed);
+ }
+ // Start with a fake length which won't affect computation.
+ // It will be updated later.
+ StringHasher hasher(String::kMaxArrayIndexSize, seed);
+ unsigned remaining = static_cast<unsigned>(vector_length);
+ const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
+ int utf16_length = 0;
+ bool is_index = true;
+ ASSERT(hasher.is_array_index_);
+ while (remaining > 0) {
+ unsigned consumed = 0;
+ uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
+ ASSERT(consumed > 0 && consumed <= remaining);
+ stream += consumed;
+ remaining -= consumed;
+ bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
+ utf16_length += is_two_characters ? 2 : 1;
+ // No need to keep hashing. But we do need to calculate utf16_length.
+ if (utf16_length > String::kMaxHashCalcLength) continue;
+ if (is_two_characters) {
+ uint16_t c1 = unibrow::Utf16::LeadSurrogate(c);
+ uint16_t c2 = unibrow::Utf16::TrailSurrogate(c);
+ hasher.AddCharacter(c1);
+ hasher.AddCharacter(c2);
+ if (is_index) is_index = hasher.UpdateIndex(c1);
+ if (is_index) is_index = hasher.UpdateIndex(c2);
+ } else {
+ hasher.AddCharacter(c);
+ if (is_index) is_index = hasher.UpdateIndex(c);
+ }
}
-
+ *utf16_length_out = static_cast<int>(utf16_length);
+ // Must set length here so that hash computation is correct.
+ hasher.length_ = utf16_length;
return hasher.GetHashField();
}
@@ -7711,11 +7709,12 @@ static void TrimDescriptorArray(Heap* heap,
Map* map,
DescriptorArray* descriptors,
int number_of_own_descriptors) {
- int number_of_descriptors = descriptors->number_of_descriptors();
+ int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim <= 0) return;
+ if (to_trim == 0) return;
- RightTrimFixedArray<FROM_GC>(heap, descriptors, to_trim);
+ RightTrimFixedArray<FROM_GC>(
+ heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
@@ -7887,11 +7886,6 @@ bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
}
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
- set_optimized_code_map(Smi::FromInt(0));
-}
-
-
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
@@ -7949,7 +7943,6 @@ void SharedFunctionInfo::InstallFromOptimizedCodeMap(JSFunction* function,
ASSERT(code != NULL);
ASSERT(function->context()->native_context() == code_map->get(index - 1));
function->ReplaceCode(code);
- code->MakeYoung();
}
@@ -8156,12 +8149,13 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
MaybeObject* Oddball::Initialize(const char* to_string,
Object* to_number,
byte kind) {
- String* symbol;
- { MaybeObject* maybe_symbol =
- Isolate::Current()->heap()->LookupAsciiSymbol(to_string);
- if (!maybe_symbol->To(&symbol)) return maybe_symbol;
+ String* internalized_to_string;
+ { MaybeObject* maybe_string =
+ Isolate::Current()->heap()->InternalizeUtf8String(
+ CStrVector(to_string));
+ if (!maybe_string->To(&internalized_to_string)) return maybe_string;
}
- set_to_string(symbol);
+ set_to_string(internalized_to_string);
set_to_number(to_number);
set_kind(kind);
return this;
@@ -8217,13 +8211,14 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
return false;
}
- Heap* heap = GetHeap();
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
// Traverse the proposed prototype chain looking for properties of the
// same names as are set by the inline constructor.
for (Object* obj = prototype;
obj != heap->null_value();
- obj = obj->GetPrototype()) {
+ obj = obj->GetPrototype(isolate)) {
JSReceiver* receiver = JSReceiver::cast(obj);
for (int i = 0; i < this_property_assignments_count(); i++) {
LookupResult result(heap->isolate());
@@ -8382,7 +8377,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
// old code, we have to replace it. We should try to avoid this
// altogether because it flushes valuable type feedback by
// effectively resetting all IC state.
- set_code(recompiled);
+ ReplaceCode(recompiled);
}
ASSERT(has_deoptimization_support());
}
@@ -8775,6 +8770,46 @@ Map* Code::FindFirstMap() {
}
+void Code::FindAllMaps(MapHandleList* maps) {
+ ASSERT(is_inline_cache_stub());
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Object* object = info->target_object();
+ if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object)));
+ }
+}
+
+
+Code* Code::FindFirstCode() {
+ ASSERT(is_inline_cache_stub());
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ return Code::GetCodeFromTargetAddress(info->target_address());
+ }
+ return NULL;
+}
+
+
+void Code::FindAllCode(CodeHandleList* code_list, int length) {
+ ASSERT(is_inline_cache_stub());
+ AssertNoAllocation no_allocation;
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+ int i = 0;
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ if (i++ == length) return;
+ RelocInfo* info = it.rinfo();
+ Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+ ASSERT(code->is_load_stub());
+ code_list->Add(Handle<Code>(code));
+ }
+ UNREACHABLE();
+}
+
+
void Code::ClearInlineCaches() {
int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
@@ -8791,6 +8826,7 @@ void Code::ClearInlineCaches() {
void Code::ClearTypeFeedbackCells(Heap* heap) {
+ if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
@@ -8805,7 +8841,8 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
+ (is_compare_ic_stub() &&
+ ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT);
}
@@ -8814,14 +8851,6 @@ void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
}
-void Code::MakeYoung() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence != NULL) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
- }
-}
-
-
void Code::MakeOlder(MarkingParity current_parity) {
byte* sequence = FindCodeAgeSequence();
if (sequence != NULL) {
@@ -8848,11 +8877,10 @@ bool Code::IsOld() {
byte* Code::FindCodeAgeSequence() {
return FLAG_age_code &&
- strlen(FLAG_stop_at) == 0 &&
- !ProfileEntryHookStub::HasEntryHook() &&
+ prologue_offset() != kPrologueOffsetNotSet &&
(kind() == OPTIMIZED_FUNCTION ||
(kind() == FUNCTION && !has_debug_break_slots()))
- ? FindPlatformCodeAgeSequence()
+ ? instruction_start() + prologue_offset()
: NULL;
}
@@ -8902,6 +8930,49 @@ Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
}
+void Code::PrintDeoptLocation(int bailout_id) {
+ const char* last_comment = NULL;
+ int mask = RelocInfo::ModeMask(RelocInfo::COMMENT)
+ | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ if (info->rmode() == RelocInfo::COMMENT) {
+ last_comment = reinterpret_cast<const char*>(info->data());
+ } else if (last_comment != NULL &&
+ bailout_id == Deoptimizer::GetDeoptimizationId(
+ info->target_address(), Deoptimizer::EAGER)) {
+ CHECK(info->rmode() == RelocInfo::RUNTIME_ENTRY);
+ PrintF(" %s\n", last_comment);
+ return;
+ }
+ }
+}
+
+
+// Identify kind of code.
+const char* Code::Kind2String(Kind kind) {
+ switch (kind) {
+ case FUNCTION: return "FUNCTION";
+ case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
+ case COMPILED_STUB: return "COMPILED_STUB";
+ case STUB: return "STUB";
+ case BUILTIN: return "BUILTIN";
+ case LOAD_IC: return "LOAD_IC";
+ case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
+ case STORE_IC: return "STORE_IC";
+ case KEYED_STORE_IC: return "KEYED_STORE_IC";
+ case CALL_IC: return "CALL_IC";
+ case KEYED_CALL_IC: return "KEYED_CALL_IC";
+ case UNARY_OP_IC: return "UNARY_OP_IC";
+ case BINARY_OP_IC: return "BINARY_OP_IC";
+ case COMPARE_IC: return "COMPARE_IC";
+ case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
+ }
+ UNREACHABLE();
+ return NULL;
+}
+
+
#ifdef ENABLE_DISASSEMBLER
void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
@@ -8961,6 +9032,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
+ case Translation::COMPILED_STUB_FRAME: {
+ Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
+ FPrintF(out, "{kind=%d}", stub_kind);
+ break;
+ }
+
case Translation::ARGUMENTS_ADAPTOR_FRAME:
case Translation::CONSTRUCT_STUB_FRAME: {
int function_id = iterator.Next();
@@ -9001,8 +9078,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
case Translation::UINT32_REGISTER: {
int reg_code = iterator.Next();
- FPrintF(out,
- "{input=%s (unsigned)}",
+ FPrintF(out, "{input=%s (unsigned)}",
converter.NameOfCPURegister(reg_code));
break;
}
@@ -9044,8 +9120,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
break;
}
- case Translation::ARGUMENTS_OBJECT:
+ case Translation::ARGUMENTS_OBJECT: {
+ bool args_known = iterator.Next();
+ int args_index = iterator.Next();
+ int args_length = iterator.Next();
+ FPrintF(out, "{index=%d, length=%d, known=%d}",
+ args_index, args_length, args_known);
break;
+ }
}
FPrintF(out, "\n");
}
@@ -9070,38 +9152,16 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
}
-// Identify kind of code.
-const char* Code::Kind2String(Kind kind) {
- switch (kind) {
- case FUNCTION: return "FUNCTION";
- case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION";
- case STUB: return "STUB";
- case BUILTIN: return "BUILTIN";
- case LOAD_IC: return "LOAD_IC";
- case KEYED_LOAD_IC: return "KEYED_LOAD_IC";
- case STORE_IC: return "STORE_IC";
- case KEYED_STORE_IC: return "KEYED_STORE_IC";
- case CALL_IC: return "CALL_IC";
- case KEYED_CALL_IC: return "KEYED_CALL_IC";
- case UNARY_OP_IC: return "UNARY_OP_IC";
- case BINARY_OP_IC: return "BINARY_OP_IC";
- case COMPARE_IC: return "COMPARE_IC";
- case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC";
- }
- UNREACHABLE();
- return NULL;
-}
-
-
const char* Code::ICState2String(InlineCacheState state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
case PREMONOMORPHIC: return "PREMONOMORPHIC";
case MONOMORPHIC: return "MONOMORPHIC";
case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
+ case POLYMORPHIC: return "POLYMORPHIC";
case MEGAMORPHIC: return "MEGAMORPHIC";
- case DEBUG_BREAK: return "DEBUG_BREAK";
- case DEBUG_PREPARE_STEP_IN: return "DEBUG_PREPARE_STEP_IN";
+ case GENERIC: return "GENERIC";
+ case DEBUG_STUB: return "DEBUG_STUB";
}
UNREACHABLE();
return NULL;
@@ -9160,11 +9220,15 @@ void Code::Disassemble(const char* name, FILE* out) {
FPrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
- CompareIC::State state = CompareIC::ComputeState(this);
- FPrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
- }
- if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
- Token::Value op = CompareIC::ComputeOperation(this);
+ ASSERT(major_key() == CodeStub::CompareIC);
+ CompareIC::State left_state, right_state, handler_state;
+ Token::Value op;
+ ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
+ &handler_state, &op);
+ FPrintF(out, "compare_state = %s*%s -> %s\n",
+ CompareIC::GetStateName(left_state),
+ CompareIC::GetStateName(right_state),
+ CompareIC::GetStateName(handler_state));
FPrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
@@ -9190,7 +9254,7 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF("\n");
- if (kind() == OPTIMIZED_FUNCTION) {
+ if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) {
SafepointTable table(this);
FPrintF(out, "Safepoints (size = %u)\n", table.size());
for (unsigned i = 0; i < table.length(); i++) {
@@ -9210,8 +9274,6 @@ void Code::Disassemble(const char* name, FILE* out) {
FPrintF(out, "\n");
}
FPrintF(out, "\n");
- // Just print if type feedback info is ever used for optimized code.
- ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
@@ -9250,12 +9312,12 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
+ ASSERT(!map()->is_observed());
// Allocate a new fast elements backing store.
FixedArray* new_elements;
- { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe->To(&new_elements)) return maybe;
- }
+ MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe->To(&new_elements)) return maybe;
ElementsKind elements_kind = GetElementsKind();
ElementsKind new_elements_kind;
@@ -9278,11 +9340,11 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
}
FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- { MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
- }
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind);
+ MaybeObject* maybe_obj =
+ accessor->CopyElements(this, new_elements, elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
Map* new_map = map();
if (new_elements_kind != elements_kind) {
@@ -9315,6 +9377,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
Heap* heap = GetHeap();
// We should never end in here with a pixel or external array.
ASSERT(!HasExternalArrayElements());
+ ASSERT(!map()->is_observed());
FixedArrayBase* elems;
{ MaybeObject* maybe_obj =
@@ -9337,9 +9400,9 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
FixedArrayBase* old_elements = elements();
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS);
{ MaybeObject* maybe_obj =
- accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS);
+ accessor->CopyElements(this, elems, elements_kind);
if (maybe_obj->IsFailure()) return maybe_obj;
}
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
@@ -9363,19 +9426,10 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength(
}
-MaybeObject* JSArray::Initialize(int capacity) {
- Heap* heap = GetHeap();
+MaybeObject* JSArray::Initialize(int capacity, int length) {
ASSERT(capacity >= 0);
- set_length(Smi::FromInt(0));
- FixedArray* new_elements;
- if (capacity == 0) {
- new_elements = heap->empty_fixed_array();
- } else {
- MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity);
- if (!maybe_obj->To(&new_elements)) return maybe_obj;
- }
- set_elements(new_elements);
- return this;
+ return GetHeap()->AllocateJSArrayStorage(this, length, capacity,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
}
@@ -9385,10 +9439,84 @@ void JSArray::Expand(int required_size) {
}
+// Returns false if the passed-in index is marked non-configurable,
+// which will cause the ES5 truncation operation to halt, and thus
+// no further old values need be collected.
+static bool GetOldValue(Isolate* isolate,
+ Handle<JSObject> object,
+ uint32_t index,
+ List<Handle<Object> >* old_values,
+ List<Handle<String> >* indices) {
+ PropertyAttributes attributes = object->GetLocalElementAttribute(index);
+ ASSERT(attributes != ABSENT);
+ if (attributes == DONT_DELETE) return false;
+ old_values->Add(object->GetLocalElementAccessorPair(index) == NULL
+ ? Object::GetElement(object, index)
+ : Handle<Object>::cast(isolate->factory()->the_hole_value()));
+ indices->Add(isolate->factory()->Uint32ToString(index));
+ return true;
+}
+
+
MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
- return GetElementsAccessor()->SetLength(this, len);
+ if (!(FLAG_harmony_observation && map()->is_observed()))
+ return GetElementsAccessor()->SetLength(this, len);
+
+ Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+ Handle<JSArray> self(this);
+ List<Handle<String> > indices;
+ List<Handle<Object> > old_values;
+ Handle<Object> old_length_handle(self->length(), isolate);
+ Handle<Object> new_length_handle(len, isolate);
+ uint32_t old_length = 0;
+ CHECK(old_length_handle->ToArrayIndex(&old_length));
+ uint32_t new_length = 0;
+ if (!new_length_handle->ToArrayIndex(&new_length))
+ return Failure::InternalError();
+
+ // Observed arrays should always be in dictionary mode;
+ // if they were in fast mode, the below is slower than necessary
+ // as it iterates over the array backing store multiple times.
+ ASSERT(self->HasDictionaryElements());
+ static const PropertyAttributes kNoAttrFilter = NONE;
+ int num_elements = self->NumberOfLocalElements(kNoAttrFilter);
+ if (num_elements > 0) {
+ if (old_length == static_cast<uint32_t>(num_elements)) {
+ // Simple case for arrays without holes.
+ for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
+ if (!GetOldValue(isolate, self, i, &old_values, &indices)) break;
+ }
+ } else {
+ // For sparse arrays, only iterate over existing elements.
+ Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements);
+ self->GetLocalElementKeys(*keys, kNoAttrFilter);
+ while (num_elements-- > 0) {
+ uint32_t index = NumberToUint32(keys->get(num_elements));
+ if (index < new_length) break;
+ if (!GetOldValue(isolate, self, index, &old_values, &indices)) break;
+ }
+ }
+ }
+
+ MaybeObject* result =
+ self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ CHECK(self->length()->ToArrayIndex(&new_length));
+ if (old_length != new_length) {
+ for (int i = 0; i < indices.length(); ++i) {
+ JSObject::EnqueueChangeRecord(
+ self, "deleted", indices[i], old_values[i]);
+ }
+ JSObject::EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_string(),
+ old_length_handle);
+ }
+ return *hresult;
}
@@ -9468,13 +9596,116 @@ void Map::ZapPrototypeTransitions() {
}
+DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
+ Recompute(entries);
+}
+
+
+void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) {
+ start_indexes_[0] = 0;
+ for (int g = 1; g <= kGroupCount; g++) {
+ int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1));
+ start_indexes_[g] = start_indexes_[g - 1] + count;
+ }
+}
+
+
+Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Code> value) {
+ GroupStartIndexes starts(*entries);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ int number_of_entries = starts.number_of_entries();
+ if (start < end && entries->code_at(end - 1) == *value) {
+ // Do not append the code if it is already in the array.
+ // It is sufficient to just check only the last element because
+ // we process embedded maps of an optimized code in one batch.
+ return entries;
+ }
+ if (entries->length() < kCodesStartIndex + number_of_entries + 1) {
+ Factory* factory = entries->GetIsolate()->factory();
+ int capacity = kCodesStartIndex + number_of_entries + 1;
+ if (capacity > 5) capacity = capacity * 5 / 4;
+ Handle<DependentCode> new_entries = Handle<DependentCode>::cast(
+ factory->CopySizeFixedArray(entries, capacity));
+ // The number of codes can change after GC.
+ starts.Recompute(*entries);
+ start = starts.at(group);
+ end = starts.at(group + 1);
+ number_of_entries = starts.number_of_entries();
+ for (int i = 0; i < number_of_entries; i++) {
+ entries->clear_code_at(i);
+ }
+ // If the old fixed array was empty, we need to reset counters of the
+ // new array.
+ if (number_of_entries == 0) {
+ for (int g = 0; g < kGroupCount; g++) {
+ new_entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0);
+ }
+ }
+ entries = new_entries;
+ }
+ entries->ExtendGroup(group);
+ entries->set_code_at(end, *value);
+ entries->set_number_of_entries(group, end + 1 - start);
+ return entries;
+}
+
+
+bool DependentCode::Contains(DependencyGroup group, Code* code) {
+ GroupStartIndexes starts(this);
+ int number_of_entries = starts.at(kGroupCount);
+ for (int i = 0; i < number_of_entries; i++) {
+ if (code_at(i) == code) return true;
+ }
+ return false;
+}
+
+
+class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter {
+ public:
+ virtual bool TakeFunction(JSFunction* function) {
+ return function->code()->marked_for_deoptimization();
+ }
+};
+
+
+void DependentCode::DeoptimizeDependentCodeGroup(
+ DependentCode::DependencyGroup group) {
+ AssertNoAllocation no_allocation_scope;
+ DependentCode::GroupStartIndexes starts(this);
+ int start = starts.at(group);
+ int end = starts.at(group + 1);
+ int number_of_entries = starts.at(DependentCode::kGroupCount);
+ if (start == end) return;
+ for (int i = start; i < end; i++) {
+ Code* code = code_at(i);
+ code->set_marked_for_deoptimization(true);
+ }
+ // Compact the array by moving all subsequent groups to fill in the new holes.
+ for (int src = end, dst = start; src < number_of_entries; src++, dst++) {
+ set_code_at(dst, code_at(src));
+ }
+ // Now the holes are at the end of the array, zap them for heap-verifier.
+ int removed = end - start;
+ for (int i = number_of_entries - removed; i < number_of_entries; i++) {
+ clear_code_at(i);
+ }
+ set_number_of_entries(group, 0);
+ DeoptimizeDependentCodeFilter filter;
+ Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
+}
+
+
MaybeObject* JSReceiver::SetPrototype(Object* value,
bool skip_hidden_prototypes) {
#ifdef DEBUG
int size = Size();
#endif
- Heap* heap = GetHeap();
+ Isolate* isolate = GetIsolate();
+ Heap* heap = isolate->heap();
// Silently ignore the change if value is not a JSObject or null.
// SpiderMonkey behaves this way.
if (!value->IsJSReceiver() && !value->IsNull()) return value;
@@ -9488,22 +9719,24 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
// or [[Extensible]] must not violate the invariants defined in the preceding
// paragraph.
if (!this->map()->is_extensible()) {
- HandleScope scope(heap->isolate());
- Handle<Object> handle(this, heap->isolate());
- return heap->isolate()->Throw(
- *FACTORY->NewTypeError("non_extensible_proto",
- HandleVector<Object>(&handle, 1)));
+ HandleScope scope(isolate);
+ Handle<Object> handle(this, isolate);
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("non_extensible_proto",
+ HandleVector<Object>(&handle, 1)));
}
// Before we can set the prototype we need to be sure
// prototype cycles are prevented.
// It is sufficient to validate that the receiver is not in the new prototype
// chain.
- for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) {
+ for (Object* pt = value;
+ pt != heap->null_value();
+ pt = pt->GetPrototype(isolate)) {
if (JSReceiver::cast(pt) == this) {
// Cycle detected.
- HandleScope scope(heap->isolate());
- return heap->isolate()->Throw(
+ HandleScope scope(isolate);
+ return isolate->Throw(
*FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0)));
}
}
@@ -9517,7 +9750,7 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
while (current_proto->IsJSObject() &&
JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) {
real_receiver = JSReceiver::cast(current_proto);
- current_proto = current_proto->GetPrototype();
+ current_proto = current_proto->GetPrototype(isolate);
}
}
@@ -9565,112 +9798,51 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-JSObject::LocalElementType JSObject::GetLocalElementType(uint32_t index) {
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return UNDEFINED_ELEMENT;
- }
+PropertyType JSObject::GetLocalPropertyType(String* name) {
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ return GetLocalElementType(index);
}
+ LookupResult lookup(GetIsolate());
+ LocalLookup(name, &lookup, true);
+ return lookup.type();
+}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return UNDEFINED_ELEMENT;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetLocalElementType(index);
- }
- // Check for lookup interceptor
- if (HasIndexedInterceptor()) {
- return GetElementAttributeWithInterceptor(this, index, false) != ABSENT
- ? INTERCEPTED_ELEMENT : UNDEFINED_ELEMENT;
+PropertyType JSObject::GetLocalElementType(uint32_t index) {
+ return GetElementsAccessor()->GetType(this, this, index);
+}
+
+
+AccessorPair* JSObject::GetLocalPropertyAccessorPair(String* name) {
+ uint32_t index = 0;
+ if (name->AsArrayIndex(&index)) {
+ return GetLocalElementAccessorPair(index);
}
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) {
- return STRING_CHARACTER_ELEMENT;
+ LookupResult lookup(GetIsolate());
+ LocalLookupRealNamedProperty(name, &lookup);
+
+ if (lookup.IsPropertyCallbacks() &&
+ lookup.GetCallbackObject()->IsAccessorPair()) {
+ return AccessorPair::cast(lookup.GetCallbackObject());
}
+ return NULL;
+}
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- if ((index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole()) {
- return FAST_ELEMENT;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>
- (Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- if ((index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
- return FAST_ELEMENT;
- }
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
- break;
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
- break;
- }
- case DICTIONARY_ELEMENTS: {
- if (element_dictionary()->FindEntry(index) !=
- SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- break;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS: {
- // Aliased parameters and non-aliased elements in a fast backing store
- // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
- // backing store behave as DICTIONARY_ELEMENT.
- FixedArray* parameter_map = FixedArray::cast(elements());
- uint32_t length = parameter_map->length();
- Object* probe =
- index < (length - 2) ? parameter_map->get(index + 2) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- if (arguments->IsDictionary()) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(arguments);
- if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
- return DICTIONARY_ELEMENT;
- }
- } else {
- length = arguments->length();
- probe = (index < length) ? arguments->get(index) : NULL;
- if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
- }
- break;
- }
+
+AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return NULL;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
}
- return UNDEFINED_ELEMENT;
+ // Check for lookup interceptor.
+ if (HasIndexedInterceptor()) return NULL;
+
+ return GetElementsAccessor()->GetAccessorPair(this, this, index);
}
@@ -9724,8 +9896,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
ASSERT(!structure->IsForeign());
// api style callbacks.
- if (structure->IsAccessorInfo()) {
- Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+ if (structure->IsExecutableAccessorInfo()) {
+ Handle<ExecutableAccessorInfo> data(
+ ExecutableAccessorInfo::cast(structure));
Object* fun_obj = data->getter();
v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj);
if (call_fun == NULL) return isolate->heap()->undefined_value();
@@ -9761,6 +9934,11 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver,
return isolate->heap()->undefined_value();
}
+ if (structure->IsDeclaredAccessorInfo()) {
+ // TODO(dcarney): Handle correctly.
+ return isolate->heap()->undefined_value();
+ }
+
UNREACHABLE();
return NULL;
}
@@ -9784,11 +9962,12 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
// callbacks should be phased out.
ASSERT(!structure->IsForeign());
- if (structure->IsAccessorInfo()) {
+ if (structure->IsExecutableAccessorInfo()) {
// api style callbacks
Handle<JSObject> self(this);
Handle<JSObject> holder_handle(JSObject::cast(holder));
- Handle<AccessorInfo> data(AccessorInfo::cast(structure));
+ Handle<ExecutableAccessorInfo> data(
+ ExecutableAccessorInfo::cast(structure));
Object* call_obj = data->setter();
v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj);
if (call_fun == NULL) return value;
@@ -9809,7 +9988,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
}
if (structure->IsAccessorPair()) {
- Handle<Object> setter(AccessorPair::cast(structure)->setter());
+ Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate);
if (setter->IsSpecFunction()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value);
@@ -9826,6 +10005,9 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure,
}
}
+ // TODO(dcarney): Handle correctly.
+ if (structure->IsDeclaredAccessorInfo()) return value;
+
UNREACHABLE();
return NULL;
}
@@ -9931,6 +10113,14 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
}
// Convert to fast double elements if appropriate.
if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) {
+ // Consider fixing the boilerplate as well if we have one.
+ ElementsKind to_kind = IsHoleyElementsKind(elements_kind)
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
MaybeObject* maybe =
SetFastDoubleElementsCapacityAndLength(new_capacity, array_length);
if (maybe->IsFailure()) return maybe;
@@ -9944,6 +10134,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
ElementsKind kind = HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
+
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(),
kind);
if (!maybe_new_map->To(&new_map)) return maybe_new_map;
@@ -9979,7 +10173,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value,
+ Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
@@ -9987,24 +10181,23 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw, isolate);
// Insert element in the dictionary.
- FixedArray* elements = FixedArray::cast(this->elements());
+ Handle<FixedArray> elements(FixedArray::cast(this->elements()));
bool is_arguments =
(elements->map() == heap->non_strict_arguments_elements_map());
- SeededNumberDictionary* dictionary = NULL;
- if (is_arguments) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
+ Handle<SeededNumberDictionary> dictionary(is_arguments
+ ? SeededNumberDictionary::cast(elements->get(1))
+ : SeededNumberDictionary::cast(*elements));
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, value, this, strict_mode);
+ return SetElementWithCallback(element, index, *value, this, strict_mode);
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -10018,7 +10211,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
- Handle<Object> holder(this);
+ Handle<Object> holder(this, isolate);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
Handle<Object> args[2] = { number, holder };
Handle<Object> error =
@@ -10033,24 +10226,24 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, value);
+ context->set(context_index, *value);
// For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = element;
+ if (!details.IsReadOnly()) value = handle(element, isolate);
}
- dictionary->ValueAtPut(entry, value);
+ dictionary->ValueAtPut(entry, *value);
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
+ // Can cause GC!
if (check_prototype) {
bool found;
- MaybeObject* result =
- SetElementWithCallbackSetterInPrototypes(
- index, value, &found, strict_mode);
+ MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
+ index, *value, &found, strict_mode);
if (found) return result;
}
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
- if (!map()->is_extensible()) {
+ if (!self->map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
@@ -10065,30 +10258,31 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
FixedArrayBase* new_dictionary;
PropertyDetails details = PropertyDetails(attributes, NORMAL);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
+ MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
if (!maybe->To(&new_dictionary)) return maybe;
- if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+ if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
if (is_arguments) {
elements->set(1, new_dictionary);
} else {
- set_elements(new_dictionary);
+ self->set_elements(new_dictionary);
}
- dictionary = SeededNumberDictionary::cast(new_dictionary);
+ dictionary =
+ handle(SeededNumberDictionary::cast(new_dictionary), isolate);
}
}
// Update the array length if this JSObject is an array.
- if (IsJSArray()) {
+ if (self->IsJSArray()) {
MaybeObject* result =
- JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
+ JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
if (result->IsFailure()) return result;
}
// Attempt to put this object back in fast case.
- if (ShouldConvertToFastElements()) {
+ if (self->ShouldConvertToFastElements()) {
uint32_t new_length = 0;
- if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
+ if (self->IsJSArray()) {
+ CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
} else {
new_length = dictionary->max_number_key() + 1;
}
@@ -10097,16 +10291,15 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
- ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
smi_mode = kForceSmiElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
- ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : SetFastElementsCapacityAndLength(new_length,
- new_length,
- smi_mode);
- ValidateElements();
+ ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
+ : self->SetFastElementsCapacityAndLength(
+ new_length, new_length, smi_mode);
+ self->ValidateElements();
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -10115,7 +10308,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
#endif
}
- return value;
+ return *value;
}
@@ -10277,25 +10470,21 @@ MaybeObject* JSObject::SetElement(uint32_t index,
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(*self, index, v8::ACCESS_SET)) {
- heap->isolate()->ReportFailedAccessCheck(*self, v8::ACCESS_SET);
- return *value;
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return value_raw;
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return *value;
+ if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
- *value,
+ value_raw,
attributes,
strict_mode,
check_prototype,
@@ -10305,7 +10494,7 @@ MaybeObject* JSObject::SetElement(uint32_t index,
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { self, number };
+ Handle<Object> args[] = { handle(this, isolate), number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -10320,23 +10509,27 @@ MaybeObject* JSObject::SetElement(uint32_t index,
dictionary->set_requires_slow_elements();
}
+ if (!(FLAG_harmony_observation && map()->is_observed())) {
+ return HasIndexedInterceptor()
+ ? SetElementWithInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode)
+ : SetElementWithoutInterceptor(
+ index, value_raw, attributes, strict_mode, check_prototype, set_mode);
+ }
+
// From here on, everything has to be handlified.
- Handle<String> name;
- Handle<Object> old_value(isolate->heap()->the_hole_value());
- Handle<Object> old_array_length;
- PropertyAttributes old_attributes = ABSENT;
- bool preexists = false;
- if (FLAG_harmony_observation && map()->is_observed()) {
- name = isolate->factory()->Uint32ToString(index);
- preexists = self->HasLocalElement(index);
- if (preexists) {
- old_attributes = self->GetLocalPropertyAttribute(*name);
- // TODO(observe): only read & set old_value if we have a data property
+ Handle<JSObject> self(this);
+ Handle<Object> value(value_raw, isolate);
+ PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
+ Handle<Object> old_value = isolate->factory()->the_hole_value();
+ Handle<Object> old_length;
+
+ if (old_attributes != ABSENT) {
+ if (self->GetLocalElementAccessorPair(index) == NULL)
old_value = Object::GetElement(self, index);
- } else if (self->IsJSArray()) {
- // Store old array length in case adding an element grows the array.
- old_array_length = handle(Handle<JSArray>::cast(self)->length());
- }
+ } else if (self->IsJSArray()) {
+ // Store old array length in case adding an element grows the array.
+ old_length = handle(Handle<JSArray>::cast(self)->length(), isolate);
}
// Check for lookup interceptor
@@ -10347,24 +10540,27 @@ MaybeObject* JSObject::SetElement(uint32_t index,
index, *value, attributes, strict_mode, check_prototype, set_mode);
Handle<Object> hresult;
- if (!result->ToHandle(&hresult)) return result;
+ if (!result->ToHandle(&hresult, isolate)) return result;
- if (FLAG_harmony_observation && map()->is_observed()) {
- PropertyAttributes new_attributes = self->GetLocalPropertyAttribute(*name);
- if (!preexists) {
- EnqueueChangeRecord(self, "new", name, old_value);
- if (self->IsJSArray() &&
- !old_array_length->SameValue(Handle<JSArray>::cast(self)->length())) {
- EnqueueChangeRecord(self, "updated",
- isolate->factory()->length_symbol(),
- old_array_length);
- }
- } else if (new_attributes != old_attributes || old_value->IsTheHole()) {
+ Handle<String> name = isolate->factory()->Uint32ToString(index);
+ PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
+ if (old_attributes == ABSENT) {
+ EnqueueChangeRecord(self, "new", name, old_value);
+ if (self->IsJSArray() &&
+ !old_length->SameValue(Handle<JSArray>::cast(self)->length())) {
+ EnqueueChangeRecord(
+ self, "updated", isolate->factory()->length_string(), old_length);
+ }
+ } else if (old_value->IsTheHole()) {
+ EnqueueChangeRecord(self, "reconfigured", name, old_value);
+ } else {
+ bool value_changed =
+ !old_value->SameValue(*Object::GetElement(self, index));
+ if (old_attributes != new_attributes) {
+ if (!value_changed) old_value = isolate->factory()->the_hole_value();
EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else {
- Handle<Object> new_value = Object::GetElement(self, index);
- if (!new_value->SameValue(*old_value))
- EnqueueChangeRecord(self, "updated", name, old_value);
+ } else if (value_changed) {
+ EnqueueChangeRecord(self, "updated", name, old_value);
}
}
@@ -10382,6 +10578,16 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index,
HasDictionaryArgumentsElements() ||
(attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
Isolate* isolate = GetIsolate();
+ if (FLAG_trace_external_array_abuse &&
+ IsExternalArrayElementsKind(GetElementsKind())) {
+ CheckArrayAbuse(this, "external elements write", index);
+ }
+ if (FLAG_trace_js_array_abuse &&
+ !IsExternalArrayElementsKind(GetElementsKind())) {
+ if (IsJSArray()) {
+ CheckArrayAbuse(this, "elements write", index, true);
+ }
+ }
switch (GetElementsKind()) {
case FAST_SMI_ELEMENTS:
case FAST_ELEMENTS:
@@ -10477,13 +10683,69 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object,
}
+MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) {
+ if (!FLAG_track_allocation_sites || !IsJSArray()) {
+ return this;
+ }
+
+ AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this);
+ if (info == NULL) {
+ return this;
+ }
+
+ if (info->payload()->IsJSArray()) {
+ JSArray* payload = JSArray::cast(info->payload());
+ ElementsKind kind = payload->GetElementsKind();
+ if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ // If the array is huge, it's not likely to be defined in a local
+ // function, so we shouldn't make new instances of it very often.
+ uint32_t length = 0;
+ CHECK(payload->length()->ToArrayIndex(&length));
+ if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF(
+ "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ return payload->TransitionElementsKind(to_kind);
+ }
+ }
+ } else if (info->payload()->IsJSGlobalPropertyCell()) {
+ JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload());
+ Object* cell_contents = cell->value();
+ if (cell_contents->IsSmi()) {
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(cell_contents)->value());
+ if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n",
+ reinterpret_cast<void*>(this),
+ ElementsKindToString(kind),
+ ElementsKindToString(to_kind));
+ }
+ cell->set_value(Smi::FromInt(to_kind));
+ }
+ }
+ }
+ return this;
+}
+
+
MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
+ ASSERT(!map()->is_observed());
ElementsKind from_kind = map()->elements_kind();
if (IsFastHoleyElementsKind(from_kind)) {
to_kind = GetHoleyElementsKind(to_kind);
}
+ if (from_kind == to_kind) return this;
+
+ MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
@@ -10732,6 +10994,9 @@ bool JSObject::ShouldConvertToFastElements() {
// An object requiring access checks is never allowed to have fast
// elements. If it had fast elements we would skip security checks.
if (IsAccessCheckNeeded()) return false;
+ // Observed objects may not go to fast mode because they rely on map checks,
+ // and for fast element accesses we sometimes check element kinds only.
+ if (FLAG_harmony_observation && map()->is_observed()) return false;
FixedArray* elements = FixedArray::cast(this->elements());
SeededNumberDictionary* dictionary = NULL;
@@ -10886,7 +11151,7 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
Isolate* isolate = GetIsolate();
InterceptorInfo* interceptor = GetNamedInterceptor();
HandleScope scope(isolate);
- Handle<Object> receiver_handle(receiver);
+ Handle<Object> receiver_handle(receiver, isolate);
Handle<JSObject> holder_handle(this);
Handle<String> name_handle(name);
@@ -11481,22 +11746,19 @@ class RegExpKey : public HashTableKey {
Smi* flags_;
};
-// Utf8SymbolKey carries a vector of chars as key.
-class Utf8SymbolKey : public HashTableKey {
+// Utf8StringKey carries a vector of chars as key.
+class Utf8StringKey : public HashTableKey {
public:
- explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed)
+ explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
bool IsMatch(Object* string) {
- return String::cast(string)->IsEqualTo(string_);
+ return String::cast(string)->IsUtf8EqualTo(string_);
}
uint32_t Hash() {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
- unibrow::Utf8InputBuffer<> buffer(string_.start(),
- static_cast<unsigned>(string_.length()));
- chars_ = buffer.Utf16Length();
- hash_field_ = String::ComputeHashField(&buffer, chars_, seed_);
+ hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -11508,7 +11770,7 @@ class Utf8SymbolKey : public HashTableKey {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return Isolate::Current()->heap()->AllocateSymbol(
+ return Isolate::Current()->heap()->AllocateInternalizedStringFromUtf8(
string_, chars_, hash_field_);
}
@@ -11520,35 +11782,15 @@ class Utf8SymbolKey : public HashTableKey {
template <typename Char>
-class SequentialSymbolKey : public HashTableKey {
+class SequentialStringKey : public HashTableKey {
public:
- explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed)
+ explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) { }
uint32_t Hash() {
- StringHasher hasher(string_.length(), seed_);
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- hash_field_ = hasher.GetHashField();
- } else {
- int i = 0;
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (i < string_.length() && hasher.is_array_index()) {
- hasher.AddCharacter(static_cast<uc32>(string_[i]));
- i++;
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (i < string_.length()) {
- hasher.AddCharacterNoIndex(static_cast<uc32>(string_[i]));
- i++;
- }
- hash_field_ = hasher.GetHashField();
- }
+ hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
+ string_.length(),
+ seed_);
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
@@ -11567,18 +11809,18 @@ class SequentialSymbolKey : public HashTableKey {
-class AsciiSymbolKey : public SequentialSymbolKey<char> {
+class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
- AsciiSymbolKey(Vector<const char> str, uint32_t seed)
- : SequentialSymbolKey<char>(str, seed) { }
+ OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
+ : SequentialStringKey<uint8_t>(str, seed) { }
bool IsMatch(Object* string) {
- return String::cast(string)->IsAsciiEqualTo(string_);
+ return String::cast(string)->IsOneByteEqualTo(string_);
}
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- MaybeObject *result = HEAP->AllocateAsciiSymbol(string_, hash_field_);
+ MaybeObject* result = HEAP->AllocateOneByteInternalizedString(string_, hash_field_);
if (!result->IsFailure() && result->ToObjectUnchecked()->IsSeqString()) {
while (true) {
Atomic32 my_symbol_id = next_symbol_id;
@@ -11598,46 +11840,22 @@ class AsciiSymbolKey : public SequentialSymbolKey<char> {
static Atomic32 next_symbol_id;
};
-Atomic32 AsciiSymbolKey::next_symbol_id = 1;
+Atomic32 OneByteStringKey::next_symbol_id = 1;
-class SubStringAsciiSymbolKey : public HashTableKey {
+class SubStringOneByteStringKey : public HashTableKey {
public:
- explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
- int from,
- int length,
- uint32_t seed)
- : string_(string), from_(from), length_(length), seed_(seed) { }
+ explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string,
+ int from,
+ int length)
+ : string_(string), from_(from), length_(length) { }
uint32_t Hash() {
ASSERT(length_ >= 0);
ASSERT(from_ + length_ <= string_->length());
- StringHasher hasher(length_, string_->GetHeap()->HashSeed());
-
- // Very long strings have a trivial hash that doesn't inspect the
- // string contents.
- if (hasher.has_trivial_hash()) {
- hash_field_ = hasher.GetHashField();
- } else {
- int i = 0;
- // Do the iterative array index computation as long as there is a
- // chance this is an array index.
- while (i < length_ && hasher.is_array_index()) {
- hasher.AddCharacter(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
- i++;
- }
-
- // Process the remaining characters without updating the array
- // index.
- while (i < length_) {
- hasher.AddCharacterNoIndex(static_cast<uc32>(
- string_->SeqAsciiStringGet(i + from_)));
- i++;
- }
- hash_field_ = hasher.GetHashField();
- }
-
+ uint8_t* chars = string_->GetChars() + from_;
+ hash_field_ = StringHasher::HashSequentialString(
+ chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -11649,29 +11867,28 @@ class SubStringAsciiSymbolKey : public HashTableKey {
}
bool IsMatch(Object* string) {
- Vector<const char> chars(string_->GetChars() + from_, length_);
- return String::cast(string)->IsAsciiEqualTo(chars);
+ Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
+ return String::cast(string)->IsOneByteEqualTo(chars);
}
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- Vector<const char> chars(string_->GetChars() + from_, length_);
- return HEAP->AllocateAsciiSymbol(chars, hash_field_);
+ Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
+ return HEAP->AllocateOneByteInternalizedString(chars, hash_field_);
}
private:
- Handle<SeqAsciiString> string_;
+ Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
- uint32_t seed_;
};
-class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
+class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
- explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed)
- : SequentialSymbolKey<uc16>(str, seed) { }
+ explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
+ : SequentialStringKey<uc16>(str, seed) { }
bool IsMatch(Object* string) {
return String::cast(string)->IsTwoByteEqualTo(string_);
@@ -11679,15 +11896,15 @@ class TwoByteSymbolKey : public SequentialSymbolKey<uc16> {
MaybeObject* AsObject() {
if (hash_field_ == 0) Hash();
- return HEAP->AllocateTwoByteSymbol(string_, hash_field_);
+ return HEAP->AllocateTwoByteInternalizedString(string_, hash_field_);
}
};
-// SymbolKey carries a string/symbol object as key.
-class SymbolKey : public HashTableKey {
+// InternalizedStringKey carries a string/internalized-string object as key.
+class InternalizedStringKey : public HashTableKey {
public:
- explicit SymbolKey(String* string)
+ explicit InternalizedStringKey(String* string)
: string_(string) { }
bool IsMatch(Object* string) {
@@ -11701,23 +11918,21 @@ class SymbolKey : public HashTableKey {
}
MaybeObject* AsObject() {
- // Attempt to flatten the string, so that symbols will most often
- // be flat strings.
+ // Attempt to flatten the string, so that internalized strings will most
+ // often be flat strings.
string_ = string_->TryFlattenGetString();
Heap* heap = string_->GetHeap();
- // Transform string to symbol if possible.
- Map* map = heap->SymbolMapForString(string_);
+ // Internalize the string if possible.
+ Map* map = heap->InternalizedStringMapForString(string_);
if (map != NULL) {
string_->set_map_no_write_barrier(map);
- ASSERT(string_->IsSymbol());
+ ASSERT(string_->IsInternalizedString());
SeqString::cast(string_)->set_symbol_id(0);
return string_;
}
- // Otherwise allocate a new symbol.
- StringInputBuffer buffer(string_);
- return heap->AllocateInternalSymbol(&buffer,
- string_->length(),
- string_->hash_field());
+ // Otherwise allocate a new internalized string.
+ return heap->AllocateInternalizedStringImpl(
+ string_, string_->length(), string_->hash_field());
}
static uint32_t StringHash(Object* obj) {
@@ -11751,7 +11966,7 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
? at_least_space_for
: ComputeCapacity(at_least_space_for);
if (capacity > HashTable::kMaxCapacity) {
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x10);
}
Object* obj;
@@ -11768,18 +11983,18 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for,
// Find entry for key otherwise return kNotFound.
int StringDictionary::FindEntry(String* key) {
- if (!key->IsSymbol()) {
+ if (!key->IsInternalizedString()) {
return HashTable<StringDictionaryShape, String*>::FindEntry(key);
}
- // Optimized for symbol key. Knowledge of the key type allows:
- // 1. Move the check if the key is a symbol out of the loop.
- // 2. Avoid comparing hash codes in symbol to symbol comparison.
- // 3. Detect a case when a dictionary key is not a symbol but the key is.
- // In case of positive result the dictionary key may be replaced by
- // the symbol with minimal performance penalty. It gives a chance to
- // perform further lookups in code stubs (and significant performance boost
- // a certain style of code).
+ // Optimized for internalized string key. Knowledge of the key type allows:
+ // 1. Move the check if the key is internalized out of the loop.
+ // 2. Avoid comparing hash codes in internalized-to-internalized comparison.
+ // 3. Detect a case when a dictionary key is not internalized but the key is.
+ // In case of positive result the dictionary key may be replaced by the
+ // internalized string with minimal performance penalty. It gives a chance
+ // to perform further lookups in code stubs (and significant performance
+ // boost a certain style of code).
// EnsureCapacity will guarantee the hash table is never full.
uint32_t capacity = Capacity();
@@ -11791,11 +12006,11 @@ int StringDictionary::FindEntry(String* key) {
Object* element = get(index);
if (element->IsUndefined()) break; // Empty entry.
if (key == element) return entry;
- if (!element->IsSymbol() &&
+ if (!element->IsInternalizedString() &&
!element->IsTheHole() &&
String::cast(element)->Equals(key)) {
- // Replace a non-symbol key by the equivalent symbol for faster further
- // lookups.
+ // Replace a key that is not an internalized string by the equivalent
+ // internalized string for faster further lookups.
set(index, key);
return entry;
}
@@ -11916,7 +12131,7 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
// Force instantiation of template instances class.
// Please note this list is compiler dependent.
-template class HashTable<SymbolTableShape, HashTableKey*>;
+template class HashTable<StringTableShape, HashTableKey*>;
template class HashTable<CompilationCacheShape, HashTableKey*>;
@@ -12066,8 +12281,9 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
Object* value = dict->ValueAt(i);
PropertyDetails details = dict->DetailsAt(i);
- if (details.type() == CALLBACKS) {
+ if (details.type() == CALLBACKS || details.IsReadOnly()) {
// Bail out and do the sorting of undefineds and array holes in JS.
+ // Also bail out if the element is not supposed to be moved.
return Smi::FromInt(-1);
}
uint32_t key = NumberToUint32(k);
@@ -12473,20 +12689,20 @@ MaybeObject* GlobalObject::EnsurePropertyCell(String* name) {
}
-MaybeObject* SymbolTable::LookupString(String* string, Object** s) {
- SymbolKey key(string);
+MaybeObject* StringTable::LookupString(String* string, Object** s) {
+ InternalizedStringKey key(string);
return LookupKey(&key, s);
}
-// This class is used for looking up two character strings in the symbol table.
+// This class is used for looking up two character strings in the string table.
// If we don't have a hit we don't want to waste much time so we unroll the
// string hash calculation loop here for speed. Doesn't work if the two
// characters form a decimal integer, since such strings have a different hash
// algorithm.
class TwoCharHashTableKey : public HashTableKey {
public:
- TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed)
+ TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed)
: c1_(c1), c2_(c2) {
// Char 1.
uint32_t hash = seed;
@@ -12502,17 +12718,17 @@ class TwoCharHashTableKey : public HashTableKey {
hash ^= hash >> 11;
hash += hash << 15;
if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash;
+ hash_ = hash;
#ifdef DEBUG
- StringHasher hasher(2, seed);
- hasher.AddCharacter(c1);
- hasher.AddCharacter(c2);
// If this assert fails then we failed to reproduce the two-character
// version of the string hashing algorithm above. One reason could be
// that we were passed two digits as characters, since the hash
// algorithm is different in that case.
- ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash));
+ uint16_t chars[2] = {c1, c2};
+ uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
+ hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+ ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
#endif
- hash_ = hash;
}
bool IsMatch(Object* o) {
@@ -12530,109 +12746,108 @@ class TwoCharHashTableKey : public HashTableKey {
}
Object* AsObject() {
- // The TwoCharHashTableKey is only used for looking in the symbol
+ // The TwoCharHashTableKey is only used for looking in the string
// table, not for adding to it.
UNREACHABLE();
return NULL;
}
private:
- uint32_t c1_;
- uint32_t c2_;
+ uint16_t c1_;
+ uint16_t c2_;
uint32_t hash_;
};
-bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
- SymbolKey key(string);
+bool StringTable::LookupStringIfExists(String* string, String** result) {
+ InternalizedStringKey key(string);
int entry = FindEntry(&key);
if (entry == kNotFound) {
return false;
} else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
+ *result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(*result).IsInternalized());
return true;
}
}
-bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1,
- uint32_t c2,
- String** symbol) {
+bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1,
+ uint16_t c2,
+ String** result) {
TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed());
int entry = FindEntry(&key);
if (entry == kNotFound) {
return false;
} else {
- String* result = String::cast(KeyAt(entry));
- ASSERT(StringShape(result).IsSymbol());
- *symbol = result;
+ *result = String::cast(KeyAt(entry));
+ ASSERT(StringShape(*result).IsInternalized());
return true;
}
}
-MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str,
- Object** s) {
- Utf8SymbolKey key(str, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupUtf8String(Vector<const char> str,
+ Object** s) {
+ Utf8StringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
- Object** s) {
- AsciiSymbolKey key(str, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str,
+ Object** s) {
+ OneByteStringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
- int from,
- int length,
- Object** s) {
- SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
+MaybeObject* StringTable::LookupSubStringOneByteString(
+ Handle<SeqOneByteString> str,
+ int from,
+ int length,
+ Object** s) {
+ SubStringOneByteStringKey key(str, from, length);
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str,
+MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str,
Object** s) {
- TwoByteSymbolKey key(str, GetHeap()->HashSeed());
+ TwoByteStringKey key(str, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
-MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) {
+MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) {
int entry = FindEntry(key);
- // Symbol already in table.
+ // String already in table.
if (entry != kNotFound) {
*s = KeyAt(entry);
return this;
}
- // Adding new symbol. Grow table if needed.
+ // Adding new string. Grow table if needed.
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Create symbol object.
- Object* symbol;
- { MaybeObject* maybe_symbol = key->AsObject();
- if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol;
+ // Create string object.
+ Object* string;
+ { MaybeObject* maybe_string = key->AsObject();
+ if (!maybe_string->ToObject(&string)) return maybe_string;
}
- // If the symbol table grew as part of EnsureCapacity, obj is not
- // the current symbol table and therefore we cannot use
- // SymbolTable::cast here.
- SymbolTable* table = reinterpret_cast<SymbolTable*>(obj);
+ // If the string table grew as part of EnsureCapacity, obj is not
+ // the current string table and therefore we cannot use
+ // StringTable::cast here.
+ StringTable* table = reinterpret_cast<StringTable*>(obj);
- // Add the new symbol and return it along with the symbol table.
+ // Add the new string and return it along with the string table.
entry = table->FindInsertionEntry(key->Hash());
- table->set(EntryToIndex(entry), symbol);
+ table->set(EntryToIndex(entry), string);
table->ElementAdded();
- *s = symbol;
+ *s = string;
return table;
}
@@ -12772,42 +12987,42 @@ void CompilationCacheTable::Remove(Object* value) {
}
-// SymbolsKey used for HashTable where key is array of symbols.
-class SymbolsKey : public HashTableKey {
+// StringsKey used for HashTable where key is array of internalzied strings.
+class StringsKey : public HashTableKey {
public:
- explicit SymbolsKey(FixedArray* symbols) : symbols_(symbols) { }
+ explicit StringsKey(FixedArray* strings) : strings_(strings) { }
- bool IsMatch(Object* symbols) {
- FixedArray* o = FixedArray::cast(symbols);
- int len = symbols_->length();
+ bool IsMatch(Object* strings) {
+ FixedArray* o = FixedArray::cast(strings);
+ int len = strings_->length();
if (o->length() != len) return false;
for (int i = 0; i < len; i++) {
- if (o->get(i) != symbols_->get(i)) return false;
+ if (o->get(i) != strings_->get(i)) return false;
}
return true;
}
- uint32_t Hash() { return HashForObject(symbols_); }
+ uint32_t Hash() { return HashForObject(strings_); }
uint32_t HashForObject(Object* obj) {
- FixedArray* symbols = FixedArray::cast(obj);
- int len = symbols->length();
+ FixedArray* strings = FixedArray::cast(obj);
+ int len = strings->length();
uint32_t hash = 0;
for (int i = 0; i < len; i++) {
- hash ^= String::cast(symbols->get(i))->Hash();
+ hash ^= String::cast(strings->get(i))->Hash();
}
return hash;
}
- Object* AsObject() { return symbols_; }
+ Object* AsObject() { return strings_; }
private:
- FixedArray* symbols_;
+ FixedArray* strings_;
};
Object* MapCache::Lookup(FixedArray* array) {
- SymbolsKey key(array);
+ StringsKey key(array);
int entry = FindEntry(&key);
if (entry == kNotFound) return GetHeap()->undefined_value();
return get(EntryToIndex(entry) + 1);
@@ -12815,7 +13030,7 @@ Object* MapCache::Lookup(FixedArray* array) {
MaybeObject* MapCache::Put(FixedArray* array, Map* value) {
- SymbolsKey key(array);
+ StringsKey key(array);
Object* obj;
{ MaybeObject* maybe_obj = EnsureCapacity(1, &key);
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -13279,8 +13494,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
- if (type == NORMAL &&
- (!value->IsJSFunction() || heap->InNewSpace(value))) {
+ if (type == NORMAL && !value->IsJSFunction()) {
number_of_fields += 1;
}
}
@@ -13335,9 +13549,10 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
Object* k = KeyAt(i);
if (IsKey(k)) {
Object* value = ValueAt(i);
- // Ensure the key is a symbol before writing into the instance descriptor.
+ // Ensure the key is an internalized string before writing into the
+ // instance descriptor.
String* key;
- MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k));
+ MaybeObject* maybe_key = heap->InternalizeString(String::cast(k));
if (!maybe_key->To(&key)) return maybe_key;
PropertyDetails details = DetailsAt(i);
@@ -13345,7 +13560,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
int enumeration_index = details.descriptor_index();
PropertyType type = details.type();
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
+ if (value->IsJSFunction()) {
ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
details.attributes(),
@@ -13545,7 +13760,8 @@ Object* DebugInfo::GetBreakPointInfo(int code_position) {
void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info,
int code_position,
Handle<Object> break_point_object) {
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ Isolate::Current());
if (break_point_info->IsUndefined()) return;
BreakPointInfo::ClearBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
@@ -13559,7 +13775,8 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
int statement_position,
Handle<Object> break_point_object) {
Isolate* isolate = Isolate::Current();
- Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position));
+ Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position),
+ isolate);
if (!break_point_info->IsUndefined()) {
BreakPointInfo::SetBreakPoint(
Handle<BreakPointInfo>::cast(break_point_info),
diff --git a/src/3rdparty/v8/src/objects.h b/src/3rdparty/v8/src/objects.h
index fe9655a..07bb288 100644
--- a/src/3rdparty/v8/src/objects.h
+++ b/src/3rdparty/v8/src/objects.h
@@ -77,7 +77,7 @@
// - DescriptorArray
// - HashTable
// - Dictionary
-// - SymbolTable
+// - StringTable
// - CompilationCacheTable
// - CodeCacheHashTable
// - MapCache
@@ -95,15 +95,25 @@
// - ExternalIntArray
// - ExternalUnsignedIntArray
// - ExternalFloatArray
-// - String
-// - SeqString
-// - SeqAsciiString
-// - SeqTwoByteString
-// - SlicedString
-// - ConsString
-// - ExternalString
-// - ExternalAsciiString
-// - ExternalTwoByteString
+// - Name
+// - String
+// - SeqString
+// - SeqOneByteString
+// - SeqTwoByteString
+// - SlicedString
+// - ConsString
+// - ExternalString
+// - ExternalAsciiString
+// - ExternalTwoByteString
+// - InternalizedString
+// - SeqInternalizedString
+// - SeqOneByteInternalizedString
+// - SeqTwoByteInternalizedString
+// - ConsInternalizedString
+// - ExternalInternalizedString
+// - ExternalAsciiInternalizedString
+// - ExternalTwoByteInternalizedString
+// - Symbol
// - HeapNumber
// - Code
// - Map
@@ -111,7 +121,10 @@
// - Foreign
// - SharedFunctionInfo
// - Struct
+// - DeclaredAccessorDescriptor
// - AccessorInfo
+// - DeclaredAccessorInfo
+// - ExecutableAccessorInfo
// - AccessorPair
// - AccessCheckInfo
// - InterceptorInfo
@@ -178,6 +191,12 @@ enum TransitionFlag {
};
+enum DebugExtraICState {
+ DEBUG_BREAK,
+ DEBUG_PREPARE_STEP_IN
+};
+
+
// Indicates whether the transition is simple: the target map of the transition
// either extends the current map with a new property, or it modifies the
// property that was added last to the current map.
@@ -225,8 +244,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// encoding is mentioned explicitly in the name. Likewise, the default
// representation is considered sequential. It is not mentioned in the
// name. The other representations (e.g. CONS, EXTERNAL) are explicitly
-// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
-// symbol) or a STRING_TYPE (if it is not a symbol).
+// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal
+// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string).
//
// NOTE: The following things are some that depend on the string types having
// instance_types that are less than those of all other types:
@@ -237,29 +256,30 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
#define INSTANCE_TYPE_LIST_ALL(V) \
- V(SYMBOL_TYPE) \
- V(ASCII_SYMBOL_TYPE) \
- V(CONS_SYMBOL_TYPE) \
- V(CONS_ASCII_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_TYPE) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE) \
- V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \
V(STRING_TYPE) \
V(ASCII_STRING_TYPE) \
V(CONS_STRING_TYPE) \
V(CONS_ASCII_STRING_TYPE) \
V(SLICED_STRING_TYPE) \
V(EXTERNAL_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(EXTERNAL_ASCII_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \
- V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \
\
+ V(INTERNALIZED_STRING_TYPE) \
+ V(ASCII_INTERNALIZED_STRING_TYPE) \
+ V(CONS_INTERNALIZED_STRING_TYPE) \
+ V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
V(MAP_TYPE) \
V(CODE_TYPE) \
V(ODDBALL_TYPE) \
@@ -282,7 +302,9 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(EXTERNAL_PIXEL_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
- V(ACCESSOR_INFO_TYPE) \
+ V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \
+ V(DECLARED_ACCESSOR_INFO_TYPE) \
+ V(EXECUTABLE_ACCESSOR_INFO_TYPE) \
V(ACCESSOR_PAIR_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@@ -291,6 +313,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(OBJECT_TEMPLATE_INFO_TYPE) \
V(SIGNATURE_INFO_TYPE) \
V(TYPE_SWITCH_INFO_TYPE) \
+ V(ALLOCATION_SITE_INFO_TYPE) \
V(SCRIPT_TYPE) \
V(CODE_CACHE_TYPE) \
V(POLYMORPHIC_CODE_CACHE_TYPE) \
@@ -335,46 +358,6 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- symbol, \
- Symbol) \
- V(ASCII_SYMBOL_TYPE, \
- kVariableSizeSentinel, \
- ascii_symbol, \
- AsciiSymbol) \
- V(CONS_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_symbol, \
- ConsSymbol) \
- V(CONS_ASCII_SYMBOL_TYPE, \
- ConsString::kSize, \
- cons_ascii_symbol, \
- ConsAsciiSymbol) \
- V(EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol, \
- ExternalSymbol) \
- V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_symbol_with_ascii_data, \
- ExternalSymbolWithAsciiData) \
- V(EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kSize, \
- external_ascii_symbol, \
- ExternalAsciiSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_symbol, \
- ShortExternalSymbol) \
- V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kShortSize, \
- short_external_symbol_with_ascii_data, \
- ShortExternalSymbolWithAsciiData) \
- V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \
- ExternalAsciiString::kShortSize, \
- short_external_ascii_symbol, \
- ShortExternalAsciiSymbol) \
V(STRING_TYPE, \
kVariableSizeSentinel, \
string, \
@@ -403,26 +386,67 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
ExternalTwoByteString::kSize, \
external_string, \
ExternalString) \
- V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
- ExternalTwoByteString::kSize, \
- external_string_with_ascii_data, \
- ExternalStringWithAsciiData) \
V(EXTERNAL_ASCII_STRING_TYPE, \
ExternalAsciiString::kSize, \
external_ascii_string, \
ExternalAsciiString) \
+ V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_string_with_ascii_data, \
+ ExternalStringWithAsciiData) \
V(SHORT_EXTERNAL_STRING_TYPE, \
ExternalTwoByteString::kShortSize, \
short_external_string, \
ShortExternalString) \
+ V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
+ ExternalAsciiString::kShortSize, \
+ short_external_ascii_string, \
+ ShortExternalAsciiString) \
V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \
ExternalTwoByteString::kShortSize, \
short_external_string_with_ascii_data, \
ShortExternalStringWithAsciiData) \
- V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \
+ \
+ V(INTERNALIZED_STRING_TYPE, \
+ kVariableSizeSentinel, \
+ internalized_string, \
+ InternalizedString) \
+ V(ASCII_INTERNALIZED_STRING_TYPE, \
+ kVariableSizeSentinel, \
+ ascii_internalized_string, \
+ AsciiInternalizedString) \
+ V(CONS_INTERNALIZED_STRING_TYPE, \
+ ConsString::kSize, \
+ cons_internalized_string, \
+ ConsInternalizedString) \
+ V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \
+ ConsString::kSize, \
+ cons_ascii_internalized_string, \
+ ConsAsciiInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string, \
+ ExternalInternalizedString) \
+ V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
+ ExternalAsciiString::kSize, \
+ external_ascii_internalized_string, \
+ ExternalAsciiInternalizedString) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kSize, \
+ external_internalized_string_with_ascii_data, \
+ ExternalInternalizedStringWithAsciiData) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string, \
+ ShortExternalInternalizedString) \
+ V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \
ExternalAsciiString::kShortSize, \
- short_external_ascii_string, \
- ShortExternalAsciiString)
+ short_external_ascii_internalized_string, \
+ ShortExternalAsciiInternalizedString) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \
+ ExternalTwoByteString::kShortSize, \
+ short_external_internalized_string_with_ascii_data, \
+ ShortExternalInternalizedStringWithAsciiData) \
// A struct is a simple object a set of object-valued fields. Including an
// object type in this causes the compiler to generate most of the boilerplate
@@ -434,7 +458,11 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
// type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST
// manually.
#define STRUCT_LIST_ALL(V) \
- V(ACCESSOR_INFO, AccessorInfo, accessor_info) \
+ V(DECLARED_ACCESSOR_DESCRIPTOR, \
+ DeclaredAccessorDescriptor, \
+ declared_accessor_descriptor) \
+ V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \
+ V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
@@ -444,6 +472,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(SIGNATURE_INFO, SignatureInfo, signature_info) \
V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \
V(SCRIPT, Script, script) \
+ V(ALLOCATION_SITE_INFO, AllocationSiteInfo, allocation_site_info) \
V(CODE_CACHE, CodeCache, code_cache) \
V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \
V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
@@ -468,12 +497,12 @@ const uint32_t kIsNotStringMask = 0x80;
const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
-// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
+// Bit 6 indicates that the object is an internalized string (if set) or not.
// There are not enough types that the non-string types (with bit 7 set) can
// have bit 6 set too.
-const uint32_t kIsSymbolMask = 0x40;
-const uint32_t kNotSymbolTag = 0x0;
-const uint32_t kSymbolTag = 0x40;
+const uint32_t kIsInternalizedMask = 0x40;
+const uint32_t kNotInternalizedTag = 0x0;
+const uint32_t kInternalizedTag = 0x40;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
@@ -516,57 +545,57 @@ const uint32_t kShortExternalStringTag = 0x10;
// A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector unless it is a
-// symbol. It's not common to have non-flat symbols, so we do not
-// shortcut them thereby avoiding turning symbols into strings. See
-// heap.cc and mark-compact.cc.
+// for being shortcut by the garbage collector unless it is internalized.
+// It's not common to have non-flat internalized strings, so we do not
+// shortcut them thereby avoiding turning internalized strings into strings.
+// See heap.cc and mark-compact.cc.
const uint32_t kShortcutTypeMask =
kIsNotStringMask |
- kIsSymbolMask |
+ kIsInternalizedMask |
kStringRepresentationMask;
const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
- SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kSeqStringTag,
- CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kOneByteStringTag | kSymbolTag | kConsStringTag,
- SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
- kExternalStringTag | kShortExternalStringTag,
- SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kSymbolTag | kExternalStringTag |
- kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kOneByteStringTag | kExternalStringTag |
- kSymbolTag | kShortExternalStringTag,
- EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
- EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
- EXTERNAL_ASCII_SYMBOL_TYPE =
- kOneByteStringTag | kSymbolTag | kExternalStringTag,
STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag,
CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag,
SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag,
- SHORT_EXTERNAL_STRING_TYPE =
- kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
- SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kExternalStringTag |
- kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kOneByteStringTag | kExternalStringTag | kShortExternalStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
- EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
- kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
- // LAST_STRING_TYPE
EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag,
- PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
+ EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+ EXTERNAL_STRING_TYPE | kAsciiDataHintTag,
+ SHORT_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kShortExternalStringTag,
+ SHORT_EXTERNAL_ASCII_STRING_TYPE =
+ EXTERNAL_ASCII_STRING_TYPE | kShortExternalStringTag,
+ SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
+ EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kShortExternalStringTag,
+
+ INTERNALIZED_STRING_TYPE = STRING_TYPE | kInternalizedTag,
+ ASCII_INTERNALIZED_STRING_TYPE = ASCII_STRING_TYPE | kInternalizedTag,
+ CONS_INTERNALIZED_STRING_TYPE = CONS_STRING_TYPE | kInternalizedTag,
+ CONS_ASCII_INTERNALIZED_STRING_TYPE =
+ CONS_ASCII_STRING_TYPE | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_STRING_TYPE | kInternalizedTag,
+ EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
+ EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
+ EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE =
+ EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
+ SHORT_EXTERNAL_STRING_TYPE | kInternalizedTag,
+ SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
+ SHORT_EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag,
+ SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE =
+ SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag,
+
+ // Non-string names
+ SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE
// Objects allocated in their own spaces (never in new space).
- MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
+ MAP_TYPE,
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
@@ -590,7 +619,9 @@ enum InstanceType {
FILLER_TYPE, // LAST_DATA_TYPE
// Structs.
- ACCESSOR_INFO_TYPE,
+ DECLARED_ACCESSOR_DESCRIPTOR_TYPE,
+ DECLARED_ACCESSOR_INFO_TYPE,
+ EXECUTABLE_ACCESSOR_INFO_TYPE,
ACCESSOR_PAIR_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
@@ -599,6 +630,7 @@ enum InstanceType {
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+ ALLOCATION_SITE_INFO_TYPE,
SCRIPT_TYPE,
CODE_CACHE_TYPE,
POLYMORPHIC_CODE_CACHE_TYPE,
@@ -645,7 +677,11 @@ enum InstanceType {
FIRST_TYPE = 0x0,
LAST_TYPE = JS_FUNCTION_TYPE,
INVALID_TYPE = FIRST_TYPE - 1,
- FIRST_NONSTRING_TYPE = MAP_TYPE,
+ FIRST_NAME_TYPE = FIRST_TYPE,
+ LAST_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE,
+ LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE,
+ FIRST_NONSTRING_TYPE = SYMBOL_TYPE,
// Boundaries for testing for an external array.
FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE,
@@ -691,7 +727,7 @@ STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType);
V(DICTIONARY_PROPERTIES_SUB_TYPE) \
V(MAP_CODE_CACHE_SUB_TYPE) \
V(SCOPE_INFO_SUB_TYPE) \
- V(SYMBOL_TABLE_SUB_TYPE) \
+ V(STRING_TABLE_SUB_TYPE) \
V(DESCRIPTOR_ARRAY_SUB_TYPE) \
V(TRANSITION_ARRAY_SUB_TYPE)
@@ -749,6 +785,12 @@ template <class C> static inline bool Is(Object* obj);
#define DECLARE_VERIFIER(Name)
#endif
+#ifdef OBJECT_PRINT
+#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout);
+#else
+#define DECLARE_PRINTER(Name)
+#endif
+
class MaybeObject BASE_EMBEDDED {
public:
inline bool IsFailure();
@@ -766,7 +808,9 @@ class MaybeObject BASE_EMBEDDED {
return reinterpret_cast<Failure*>(this);
}
inline Object* ToObjectUnchecked() {
- ASSERT(!IsFailure());
+ // TODO(jkummerow): Turn this back into an ASSERT when we can be certain
+ // that it never fires in Release mode in the wild.
+ CHECK(!IsFailure());
return reinterpret_cast<Object*>(this);
}
inline Object* ToObjectChecked() {
@@ -782,9 +826,9 @@ class MaybeObject BASE_EMBEDDED {
}
template<typename T>
- inline bool ToHandle(Handle<T>* obj) {
+ inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
if (IsFailure()) return false;
- *obj = handle(T::cast(reinterpret_cast<Object*>(this)));
+ *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
return true;
}
@@ -813,8 +857,9 @@ class MaybeObject BASE_EMBEDDED {
#define HEAP_OBJECT_TYPE_LIST(V) \
V(HeapNumber) \
+ V(Name) \
+ V(UniqueName) \
V(String) \
- V(Symbol) \
V(SeqString) \
V(ExternalString) \
V(ConsString) \
@@ -822,7 +867,9 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalTwoByteString) \
V(ExternalAsciiString) \
V(SeqTwoByteString) \
- V(SeqAsciiString) \
+ V(SeqOneByteString) \
+ V(InternalizedString) \
+ V(Symbol) \
\
V(ExternalArray) \
V(ExternalByteArray) \
@@ -845,6 +892,7 @@ class MaybeObject BASE_EMBEDDED {
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
+ V(DependentCode) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -870,7 +918,7 @@ class MaybeObject BASE_EMBEDDED {
V(JSRegExp) \
V(HashTable) \
V(Dictionary) \
- V(SymbolTable) \
+ V(StringTable) \
V(JSFunctionResultCache) \
V(NormalizedMapCache) \
V(CompilationCacheTable) \
@@ -907,6 +955,8 @@ class Object : public MaybeObject {
#undef IS_TYPE_FUNCTION_DECL
inline bool IsFixedArrayBase();
+ inline bool IsExternal();
+ inline bool IsAccessorInfo();
// Returns true if this object is an instance of the specified
// function template.
@@ -965,6 +1015,7 @@ class Object : public MaybeObject {
String* key,
PropertyAttributes* attributes);
+ static Handle<Object> GetProperty(Handle<Object> object, Handle<String> key);
static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
@@ -987,7 +1038,10 @@ class Object : public MaybeObject {
uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
- Object* GetPrototype();
+ Object* GetPrototype(Isolate* isolate);
+
+ // Return the prototype, or the method holder for a value-like object.
+ Object* GetDelegate(Isolate* isolate);
// Returns the permanent hash code associated with this object depending on
// the actual object type. Might return a failure in case no hash was
@@ -1124,7 +1178,9 @@ class Failure: public MaybeObject {
static inline Failure* RetryAfterGC(); // NEW_SPACE
static inline Failure* Exception();
static inline Failure* InternalError();
- static inline Failure* OutOfMemoryException();
+ // TODO(jkummerow): The value is temporary instrumentation. Remove it
+ // when it has served its purpose.
+ static inline Failure* OutOfMemoryException(intptr_t value);
// Casting.
static inline Failure* cast(MaybeObject* object);
@@ -1509,11 +1565,10 @@ class JSReceiver: public HeapObject {
// Lookup a property. If found, the result is valid and has
// detailed information.
- void LocalLookup(String* name,
- LookupResult* result,
+ void LocalLookup(String* name, LookupResult* result,
+ bool search_hidden_prototypes = false,
bool skip_fallback_interceptor = false);
- void Lookup(String* name,
- LookupResult* result,
+ void Lookup(String* name, LookupResult* result,
bool skip_fallback_interceptor = false);
protected:
@@ -1572,6 +1627,8 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
+ // Returns true if an object has any of the fast elements kinds.
+ inline bool HasFastElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
@@ -1767,7 +1824,7 @@ class JSObject: public JSReceiver {
//
// Hidden properties are not local properties of the object itself.
// Instead they are stored in an auxiliary structure kept as a local
- // property with a special name Heap::hidden_symbol(). But if the
+ // property with a special name Heap::hidden_string(). But if the
// receiver is a JSGlobalProxy then the auxiliary object is a property
// of its prototype, and if it's a detached proxy, then you can't have
// hidden properties.
@@ -1786,7 +1843,7 @@ class JSObject: public JSReceiver {
// Deletes a hidden property. Deleting a non-existing property is
// considered successful.
void DeleteHiddenProperty(String* key);
- // Returns true if the object has a property with the hidden symbol as name.
+ // Returns true if the object has a property with the hidden string as name.
bool HasHiddenProperties();
static int GetIdentityHash(Handle<JSObject> obj);
@@ -1840,25 +1897,12 @@ class JSObject: public JSReceiver {
return old_capacity + (old_capacity >> 1) + 16;
}
- // Tells whether the index'th element is present and how it is stored.
- enum LocalElementType {
- // There is no element with given index.
- UNDEFINED_ELEMENT,
-
- // Element with given index is handled by interceptor.
- INTERCEPTED_ELEMENT,
-
- // Element with given index is character in string.
- STRING_CHARACTER_ELEMENT,
+ PropertyType GetLocalPropertyType(String* name);
+ PropertyType GetLocalElementType(uint32_t index);
- // Element with given index is stored in fast backing store.
- FAST_ELEMENT,
-
- // Element with given index is stored in slow backing store.
- DICTIONARY_ELEMENT
- };
-
- LocalElementType GetLocalElementType(uint32_t index);
+ // These methods do not perform access checks!
+ AccessorPair* GetLocalPropertyAccessorPair(String* name);
+ AccessorPair* GetLocalElementAccessorPair(uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
@@ -1885,7 +1929,7 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
- static MUST_USE_RESULT Handle<Object> SetElement(
+ static Handle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
@@ -1944,7 +1988,7 @@ class JSObject: public JSReceiver {
inline void SetInternalField(int index, Object* value);
inline void SetInternalField(int index, Smi* value);
- inline void SetExternalResourceObject(Object *);
+ inline void SetExternalResourceObject(Object* value);
inline Object *GetExternalResourceObject();
// The following lookup functions skip interceptors.
@@ -2014,6 +2058,8 @@ class JSObject: public JSReceiver {
ElementsKind to_kind);
MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind);
+ MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo(
+ ElementsKind to_kind);
// Replaces an existing transition with a transition to a map with a FIELD.
MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition(
@@ -2117,12 +2163,7 @@ class JSObject: public JSReceiver {
// Dispatched behavior.
void JSObjectShortPrint(StringStream* accumulator);
-#ifdef OBJECT_PRINT
- inline void JSObjectPrint() {
- JSObjectPrint(stdout);
- }
- void JSObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
inline void PrintProperties() {
@@ -2380,12 +2421,12 @@ class FixedArray: public FixedArrayBase {
inline void set_unchecked(Heap* heap, int index, Object* value,
WriteBarrierMode mode);
- // Gives access to raw memory which stores the array's data.
- inline Object** data_start();
-
inline Object** GetFirstElementAddress();
inline bool ContainsOnlySmisOrHoles();
+ // Gives access to raw memory which stores the array's data.
+ inline Object** data_start();
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2416,12 +2457,7 @@ class FixedArray: public FixedArrayBase {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void FixedArrayPrint() {
- FixedArrayPrint(stdout);
- }
- void FixedArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(FixedArray)
DECLARE_VERIFIER(FixedArray)
#ifdef DEBUG
// Checks if two FixedArrays have identical contents.
@@ -2460,6 +2496,8 @@ class FixedArray: public FixedArrayBase {
Object* value);
private:
+ STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -2485,6 +2523,9 @@ class FixedDoubleArray: public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
+ // Gives access to raw memory which stores the array's data.
+ inline double* data_start();
+
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
@@ -2503,12 +2544,7 @@ class FixedDoubleArray: public FixedArrayBase {
static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize;
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void FixedDoubleArrayPrint() {
- FixedDoubleArrayPrint(stdout);
- }
- void FixedDoubleArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(FixedDoubleArray)
DECLARE_VERIFIER(FixedDoubleArray)
private:
@@ -2614,6 +2650,8 @@ class DescriptorArray: public FixedArray {
inline Object** GetKeySlot(int descriptor_number);
inline Object* GetValue(int descriptor_number);
inline Object** GetValueSlot(int descriptor_number);
+ inline Object** GetDescriptorStartSlot(int descriptor_number);
+ inline Object** GetDescriptorEndSlot(int descriptor_number);
inline PropertyDetails GetDetails(int descriptor_number);
inline PropertyType GetType(int descriptor_number);
inline int GetFieldIndex(int descriptor_number);
@@ -3007,7 +3045,7 @@ class HashTableKey {
};
-class SymbolTableShape : public BaseShape<HashTableKey*> {
+class StringTableShape : public BaseShape<HashTableKey*> {
public:
static inline bool IsMatch(HashTableKey* key, Object* value) {
return key->IsMatch(value);
@@ -3026,45 +3064,49 @@ class SymbolTableShape : public BaseShape<HashTableKey*> {
static const int kEntrySize = 1;
};
-class SeqAsciiString;
+class SeqOneByteString;
-// SymbolTable.
+// StringTable.
//
// No special elements in the prefix and the element size is 1
-// because only the symbol itself (the key) needs to be stored.
-class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
+// because only the string itself (the key) needs to be stored.
+class StringTable: public HashTable<StringTableShape, HashTableKey*> {
public:
- // Find symbol in the symbol table. If it is not there yet, it is
- // added. The return value is the symbol table which might have
- // been enlarged. If the return value is not a failure, the symbol
- // pointer *s is set to the symbol found.
- MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
- Object** s);
- MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
- Handle<SeqAsciiString> str,
+ // Find string in the string table. If it is not there yet, it is
+ // added. The return value is the string table which might have
+ // been enlarged. If the return value is not a failure, the string
+ // pointer *s is set to the string found.
+ MUST_USE_RESULT MaybeObject* LookupUtf8String(
+ Vector<const char> str,
+ Object** s);
+ MUST_USE_RESULT MaybeObject* LookupOneByteString(
+ Vector<const uint8_t> str,
+ Object** s);
+ MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString(
+ Handle<SeqOneByteString> str,
int from,
int length,
Object** s);
- MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str,
- Object** s);
+ MUST_USE_RESULT MaybeObject* LookupTwoByteString(
+ Vector<const uc16> str,
+ Object** s);
MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s);
- // Looks up a symbol that is equal to the given string and returns
- // true if it is found, assigning the symbol to the given output
+ // Looks up a string that is equal to the given string and returns
+ // true if it is found, assigning the string to the given output
// parameter.
- bool LookupSymbolIfExists(String* str, String** symbol);
- bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol);
+ bool LookupStringIfExists(String* str, String** result);
+ bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result);
// Casting.
- static inline SymbolTable* cast(Object* obj);
+ static inline StringTable* cast(Object* obj);
private:
MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s);
template <bool seq_ascii> friend class JsonParser;
- DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
};
@@ -3092,7 +3134,7 @@ class MapCacheShape : public BaseShape<HashTableKey*> {
// MapCache.
//
-// Maps keys that are a fixed array of symbols to a map.
+// Maps keys that are a fixed array of internalized strings to a map.
// Used for canonicalize maps for object literals.
class MapCache: public HashTable<MapCacheShape, HashTableKey*> {
public:
@@ -3528,13 +3570,13 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
- // present; otherwise returns a value < 0. The name must be a symbol
- // (canonicalized).
+ // present; otherwise returns a value < 0. The name must be an internalized
+ // string.
int StackSlotIndex(String* name);
// Lookup support for serialized scope info. Returns the
// context slot index for a given slot name if the slot is present; otherwise
- // returns a value < 0. The name must be a symbol (canonicalized).
+ // returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
int ContextSlotIndex(String* name,
@@ -3543,19 +3585,26 @@ class ScopeInfo : public FixedArray {
// Lookup support for serialized scope info. Returns the
// parameter index for a given parameter name if the parameter is present;
- // otherwise returns a value < 0. The name must be a symbol (canonicalized).
+ // otherwise returns a value < 0. The name must be an internalized string.
int ParameterIndex(String* name);
// Lookup support for serialized scope info. Returns the function context
// slot index if the function name is present and context-allocated (named
// function expressions, only), otherwise returns a value < 0. The name
- // must be a symbol (canonicalized).
+ // must be an internalized string.
int FunctionContextSlotIndex(String* name, VariableMode* mode);
+
+ // Copies all the context locals into an object used to materialize a scope.
+ bool CopyContextLocalsToScopeObject(Isolate* isolate,
+ Handle<Context> context,
+ Handle<JSObject> scope_object);
+
+
static Handle<ScopeInfo> Create(Scope* scope, Zone* zone);
// Serializes empty scope info.
- static ScopeInfo* Empty();
+ static ScopeInfo* Empty(Isolate* isolate);
#ifdef DEBUG
void Print();
@@ -3715,12 +3764,7 @@ class ByteArray: public FixedArrayBase {
inline int ByteArraySize() {
return SizeFor(this->length());
}
-#ifdef OBJECT_PRINT
- inline void ByteArrayPrint() {
- ByteArrayPrint(stdout);
- }
- void ByteArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(ByteArray)
DECLARE_VERIFIER(ByteArray)
// Layout description.
@@ -3749,12 +3793,8 @@ class FreeSpace: public HeapObject {
// Casting.
static inline FreeSpace* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void FreeSpacePrint() {
- FreeSpacePrint(stdout);
- }
- void FreeSpacePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(FreeSpace)
DECLARE_VERIFIER(FreeSpace)
// Layout description.
@@ -3829,12 +3869,8 @@ class ExternalPixelArray: public ExternalArray {
// Casting.
static inline ExternalPixelArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalPixelArrayPrint() {
- ExternalPixelArrayPrint(stdout);
- }
- void ExternalPixelArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalPixelArray)
DECLARE_VERIFIER(ExternalPixelArray)
private:
@@ -3856,12 +3892,8 @@ class ExternalByteArray: public ExternalArray {
// Casting.
static inline ExternalByteArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalByteArrayPrint() {
- ExternalByteArrayPrint(stdout);
- }
- void ExternalByteArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalByteArray)
DECLARE_VERIFIER(ExternalByteArray)
private:
@@ -3883,12 +3915,8 @@ class ExternalUnsignedByteArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedByteArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedByteArrayPrint() {
- ExternalUnsignedByteArrayPrint(stdout);
- }
- void ExternalUnsignedByteArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedByteArray)
DECLARE_VERIFIER(ExternalUnsignedByteArray)
private:
@@ -3910,12 +3938,8 @@ class ExternalShortArray: public ExternalArray {
// Casting.
static inline ExternalShortArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalShortArrayPrint() {
- ExternalShortArrayPrint(stdout);
- }
- void ExternalShortArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalShortArray)
DECLARE_VERIFIER(ExternalShortArray)
private:
@@ -3937,12 +3961,8 @@ class ExternalUnsignedShortArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedShortArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedShortArrayPrint() {
- ExternalUnsignedShortArrayPrint(stdout);
- }
- void ExternalUnsignedShortArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedShortArray)
DECLARE_VERIFIER(ExternalUnsignedShortArray)
private:
@@ -3964,12 +3984,8 @@ class ExternalIntArray: public ExternalArray {
// Casting.
static inline ExternalIntArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalIntArrayPrint() {
- ExternalIntArrayPrint(stdout);
- }
- void ExternalIntArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalIntArray)
DECLARE_VERIFIER(ExternalIntArray)
private:
@@ -3991,12 +4007,8 @@ class ExternalUnsignedIntArray: public ExternalArray {
// Casting.
static inline ExternalUnsignedIntArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalUnsignedIntArrayPrint() {
- ExternalUnsignedIntArrayPrint(stdout);
- }
- void ExternalUnsignedIntArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalUnsignedIntArray)
DECLARE_VERIFIER(ExternalUnsignedIntArray)
private:
@@ -4018,12 +4030,8 @@ class ExternalFloatArray: public ExternalArray {
// Casting.
static inline ExternalFloatArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalFloatArrayPrint() {
- ExternalFloatArrayPrint(stdout);
- }
- void ExternalFloatArrayPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalFloatArray)
DECLARE_VERIFIER(ExternalFloatArray)
private:
@@ -4045,12 +4053,8 @@ class ExternalDoubleArray: public ExternalArray {
// Casting.
static inline ExternalDoubleArray* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ExternalDoubleArrayPrint() {
- ExternalDoubleArrayPrint(stdout);
- }
- void ExternalDoubleArrayPrint(FILE* out);
-#endif // OBJECT_PRINT
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExternalDoubleArray)
DECLARE_VERIFIER(ExternalDoubleArray)
private:
@@ -4213,6 +4217,11 @@ class TypeFeedbackCells: public FixedArray {
// The object that indicates a megamorphic state.
static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+ // The object that indicates a monomorphic state of Array with
+ // ElementsKind
+ static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
+ ElementsKind elements_kind);
+
// A raw version of the uninitialized sentinel that's safe to read during
// garbage collection (e.g., for patching the cache).
static inline Object* RawUninitializedSentinel(Heap* heap);
@@ -4245,6 +4254,7 @@ class Code: public HeapObject {
V(FUNCTION) \
V(OPTIMIZED_FUNCTION) \
V(STUB) \
+ V(COMPILED_STUB) \
V(BUILTIN) \
V(LOAD_IC) \
V(KEYED_LOAD_IC) \
@@ -4273,6 +4283,8 @@ class Code: public HeapObject {
// Flags.
STATIC_ASSERT(LAST_CODE_KIND < 16);
+ static const char* Kind2String(Kind kind);
+
// Types of stubs.
enum StubType {
NORMAL,
@@ -4284,6 +4296,11 @@ class Code: public HeapObject {
NONEXISTENT
};
+ enum IcFragment {
+ IC_FRAGMENT,
+ HANDLER_FRAGMENT
+ };
+
enum {
NUMBER_OF_KINDS = LAST_IC_KIND + 1
};
@@ -4294,7 +4311,6 @@ class Code: public HeapObject {
#ifdef ENABLE_DISASSEMBLER
// Printing
- static const char* Kind2String(Kind kind);
static const char* ICState2String(InlineCacheState state);
static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
@@ -4318,9 +4334,18 @@ class Code: public HeapObject {
// [deoptimization_data]: Array containing data for deopt.
DECL_ACCESSORS(deoptimization_data, FixedArray)
- // [type_feedback_info]: Struct containing type feedback information.
- // Will contain either a TypeFeedbackInfo object, or undefined.
+ // [type_feedback_info]: Struct containing type feedback information for
+ // unoptimized code. Optimized code can temporarily store the head of
+ // the list of the dependent optimized functions during deoptimization.
+ // STUBs can use this slot to store arbitrary information as a Smi.
+ // Will contain either a TypeFeedbackInfo object, or JSFunction object,
+ // or undefined, or a Smi.
DECL_ACCESSORS(type_feedback_info, Object)
+ inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
+ inline int stub_info();
+ inline void set_stub_info(int info);
+ inline Object* deoptimizing_functions();
+ inline void set_deoptimizing_functions(Object* value);
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -4332,6 +4357,11 @@ class Code: public HeapObject {
inline void set_ic_age(int count);
inline int ic_age();
+ // [prologue_offset]: Offset of the function prologue, used for aging
+ // FUNCTIONs and OPTIMIZED_FUNCTIONs.
+ inline int prologue_offset();
+ inline void set_prologue_offset(int offset);
+
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -4351,6 +4381,7 @@ class Code: public HeapObject {
// Testers for IC stub kinds.
inline bool is_inline_cache_stub();
+ inline bool is_debug_break();
inline bool is_load_stub() { return kind() == LOAD_IC; }
inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; }
inline bool is_store_stub() { return kind() == STORE_IC; }
@@ -4426,21 +4457,6 @@ class Code: public HeapObject {
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
- // [type-recording binary op type]: For kind BINARY_OP_IC.
- inline byte binary_op_type();
- inline void set_binary_op_type(byte value);
- inline byte binary_op_result_type();
- inline void set_binary_op_result_type(byte value);
-
- // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
- inline byte compare_state();
- inline void set_compare_state(byte value);
-
- // [compare_operation]: For kind COMPARE_IC tells what compare operation the
- // stub was generated for.
- inline byte compare_operation();
- inline void set_compare_operation(byte value);
-
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
@@ -4450,6 +4466,12 @@ class Code: public HeapObject {
inline bool has_function_cache();
inline void set_has_function_cache(bool flag);
+
+ // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether
+ // the code is going to be deoptimized because of dead embedded maps.
+ inline bool marked_for_deoptimization();
+ inline void set_marked_for_deoptimization(bool flag);
+
bool allowed_in_shared_map_code_cache();
// Get the safepoint entry for the given pc.
@@ -4461,6 +4483,11 @@ class Code: public HeapObject {
// Find the first map in an IC stub.
Map* FindFirstMap();
+ void FindAllMaps(MapHandleList* maps);
+
+ // Find the first code in an IC stub.
+ Code* FindFirstCode();
+ void FindAllCode(CodeHandleList* code_list, int length);
class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {};
class ExtraICStateKeyedAccessGrowMode:
@@ -4495,10 +4522,10 @@ class Code: public HeapObject {
static inline Flags ComputeMonomorphicFlags(
Kind kind,
- StubType type,
ExtraICState extra_ic_state = kNoExtraICState,
- InlineCacheHolderFlag holder = OWN_MAP,
- int argc = -1);
+ StubType type = NORMAL,
+ int argc = -1,
+ InlineCacheHolderFlag holder = OWN_MAP);
static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
static inline StubType ExtractTypeFromFlags(Flags flags);
@@ -4568,12 +4595,8 @@ class Code: public HeapObject {
template<typename StaticVisitor>
inline void CodeIterateBody(Heap* heap);
-#ifdef OBJECT_PRINT
- inline void CodePrint() {
- CodePrint(stdout);
- }
- void CodePrint(FILE* out);
-#endif
+
+ DECLARE_PRINTER(Code)
DECLARE_VERIFIER(Code)
void ClearInlineCaches();
@@ -4591,11 +4614,16 @@ class Code: public HeapObject {
// Code aging
static void MakeCodeAgeSequenceYoung(byte* sequence);
- void MakeYoung();
void MakeOlder(MarkingParity);
static bool IsYoungSequence(byte* sequence);
bool IsOld();
+ void PrintDeoptLocation(int bailout_id);
+
+#ifdef VERIFY_HEAP
+ void VerifyEmbeddedMapsDependency();
+#endif
+
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -4615,8 +4643,10 @@ class Code: public HeapObject {
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
kKindSpecificFlags1Offset + kIntSize;
+ // Note: We might be able to squeeze this into the flags above.
+ static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
- static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
+ static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -4650,51 +4680,34 @@ class Code: public HeapObject {
static const int kUnaryOpTypeFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kUnaryOpTypeBitCount = 3;
- static const int kBinaryOpTypeFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kBinaryOpTypeBitCount = 3;
- static const int kBinaryOpResultTypeFirstBit =
- kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
- static const int kBinaryOpResultTypeBitCount = 3;
- static const int kCompareStateFirstBit =
- kStackSlotsFirstBit + kStackSlotsBitCount;
- static const int kCompareStateBitCount = 3;
- static const int kCompareOperationFirstBit =
- kCompareStateFirstBit + kCompareStateBitCount;
- static const int kCompareOperationBitCount = 4;
static const int kToBooleanStateFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kToBooleanStateBitCount = 8;
static const int kHasFunctionCacheFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kHasFunctionCacheBitCount = 1;
+ static const int kMarkedForDeoptimizationFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount + 1;
+ static const int kMarkedForDeoptimizationBitCount = 1;
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
- STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
- kBinaryOpResultTypeBitCount <= 32);
- STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
- STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
+ STATIC_ASSERT(kMarkedForDeoptimizationFirstBit +
+ kMarkedForDeoptimizationBitCount <= 32);
class StackSlotsField: public BitField<int,
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
class UnaryOpTypeField: public BitField<int,
kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpTypeField: public BitField<int,
- kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
- class BinaryOpResultTypeField: public BitField<int,
- kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
- class CompareStateField: public BitField<int,
- kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
- class CompareOperationField: public BitField<int,
- kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
class ToBooleanStateField: public BitField<int,
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT
+ class MarkedForDeoptimizationField: public BitField<bool,
+ kMarkedForDeoptimizationFirstBit,
+ kMarkedForDeoptimizationBitCount> {}; // NOLINT
// KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
static const int kStubMajorKeyFirstBit = 0;
@@ -4735,7 +4748,6 @@ class Code: public HeapObject {
static Code* GetCodeAgeStub(Age age, MarkingParity parity);
// Code aging -- platform-specific
- byte* FindPlatformCodeAgeSequence();
static void PatchPlatformCodeAge(byte* sequence, Age age,
MarkingParity parity);
@@ -4743,6 +4755,75 @@ class Code: public HeapObject {
};
+// This class describes the layout of dependent codes array of a map. The
+// array is partitioned into several groups of dependent codes. Each group
+// contains codes with the same dependency on the map. The array has the
+// following layout for n dependency groups:
+//
+// +----+----+-----+----+---------+----------+-----+---------+-----------+
+// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined |
+// +----+----+-----+----+---------+----------+-----+---------+-----------+
+//
+// The first n elements are Smis, each of them specifies the number of codes
+// in the corresponding group. The subsequent elements contain grouped code
+// objects. The suffix of the array can be filled with the undefined value if
+// the number of codes is less than the length of the array. The order of the
+// code objects within a group is not preserved.
+//
+// All code indexes used in the class are counted starting from the first
+// code object of the first group. In other words, code index 0 corresponds
+// to array index n = kCodesStartIndex.
+
+class DependentCode: public FixedArray {
+ public:
+ enum DependencyGroup {
+ // Group of code that weakly embed this map and depend on being
+ // deoptimized when the map is garbage collected.
+ kWeaklyEmbeddedGroup,
+ // Group of code that omit run-time prototype checks for prototypes
+ // described by this map. The group is deoptimized whenever an object
+ // described by this map changes shape (and transitions to a new map),
+ // possibly invalidating the assumptions embedded in the code.
+ kPrototypeCheckGroup,
+ kGroupCount = kPrototypeCheckGroup + 1
+ };
+
+ // Array for holding the index of the first code object of each group.
+ // The last element stores the total number of code objects.
+ class GroupStartIndexes {
+ public:
+ explicit GroupStartIndexes(DependentCode* entries);
+ void Recompute(DependentCode* entries);
+ int at(int i) { return start_indexes_[i]; }
+ int number_of_entries() { return start_indexes_[kGroupCount]; }
+ private:
+ int start_indexes_[kGroupCount + 1];
+ };
+
+ bool Contains(DependencyGroup group, Code* code);
+ static Handle<DependentCode> Insert(Handle<DependentCode> entries,
+ DependencyGroup group,
+ Handle<Code> value);
+ void DeoptimizeDependentCodeGroup(DependentCode::DependencyGroup group);
+
+ // The following low-level accessors should only be used by this class
+ // and the mark compact collector.
+ inline int number_of_entries(DependencyGroup group);
+ inline void set_number_of_entries(DependencyGroup group, int value);
+ inline Code* code_at(int i);
+ inline void set_code_at(int i, Code* value);
+ inline Object** code_slot_at(int i);
+ inline void clear_code_at(int i);
+ static inline DependentCode* cast(Object* object);
+
+ private:
+ // Make a room at the end of the given group by moving out the first
+ // code objects of the subsequent groups.
+ inline void ExtendGroup(DependencyGroup group);
+ static const int kCodesStartIndex = kGroupCount;
+};
+
+
// All heap objects have a Map that describes their structure.
// A Map contains information about:
// - Size information about the object
@@ -4866,6 +4947,10 @@ class Map: public HeapObject {
inline void set_elements_kind(ElementsKind elements_kind) {
ASSERT(elements_kind < kElementsKindCount);
ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount));
+ ASSERT(!is_observed() ||
+ elements_kind == DICTIONARY_ELEMENTS ||
+ elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS ||
+ IsExternalArrayElementsKind(elements_kind));
set_bit_field2((bit_field2() & ~kElementsKindMask) |
(elements_kind << kElementsKindShift));
ASSERT(this->elements_kind() == elements_kind);
@@ -4894,6 +4979,10 @@ class Map: public HeapObject {
return IsFastDoubleElementsKind(elements_kind());
}
+ inline bool has_fast_elements() {
+ return IsFastElementsKind(elements_kind());
+ }
+
inline bool has_non_strict_arguments_elements() {
return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS;
}
@@ -4981,6 +5070,9 @@ class Map: public HeapObject {
// [stub cache]: contains stubs compiled for this map.
DECL_ACCESSORS(code_cache, Object)
+ // [dependent code]: list of optimized codes that have this map embedded.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
// [back pointer]: points back to the parent map from which a transition
// leads to this map. The field overlaps with prototype transitions and the
// back pointer will be moved into the prototype transitions array if
@@ -5072,7 +5164,7 @@ class Map: public HeapObject {
set_bit_field3(EnumLengthBits::update(bit_field3(), length));
}
-
+ inline bool CanTrackAllocationSite();
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
inline bool is_observed();
@@ -5190,17 +5282,29 @@ class Map: public HeapObject {
void ZapPrototypeTransitions();
void ZapTransitions();
- // Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void MapPrint() {
- MapPrint(stdout);
+ bool CanTransition() {
+ // Only JSObject and subtypes have map transitions and back pointers.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
+ return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
- void MapPrint(FILE* out);
-#endif
+
+ // Fires when the layout of an object with a leaf map changes.
+ // This includes adding transitions to the leaf map or changing
+ // the descriptor array.
+ inline void NotifyLeafMapLayoutChange();
+
+ inline bool CanOmitPrototypeChecks();
+
+ inline void AddDependentCode(DependentCode::DependencyGroup group,
+ Handle<Code> code);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Map)
DECLARE_VERIFIER(Map)
#ifdef VERIFY_HEAP
void SharedMapVerify();
+ void VerifyOmittedPrototypeChecks();
#endif
inline int visitor_id();
@@ -5242,9 +5346,9 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kDescriptorsOffset =
kTransitionsOrBackPointerOffset + kPointerSize;
- static const int kCodeCacheOffset =
- kDescriptorsOffset + kPointerSize;
- static const int kBitField3Offset = kCodeCacheOffset + kPointerSize;
+ static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
+ static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
+ static const int kBitField3Offset = kDependentCodeOffset + kPointerSize;
static const int kSize = kBitField3Offset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
@@ -5397,12 +5501,8 @@ class Script: public Struct {
// resource is accessible. Otherwise, always return true.
inline bool HasValidSource();
-#ifdef OBJECT_PRINT
- inline void ScriptPrint() {
- ScriptPrint(stdout);
- }
- void ScriptPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(Script)
DECLARE_VERIFIER(Script)
static const int kSourceOffset = HeapObject::kHeaderSize;
@@ -5487,6 +5587,7 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
+ inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
// and a shared literals array or Smi 0 if none.
@@ -5502,7 +5603,7 @@ class SharedFunctionInfo: public HeapObject {
void InstallFromOptimizedCodeMap(JSFunction* function, int index);
// Clear optimized code map.
- void ClearOptimizedCodeMap();
+ inline void ClearOptimizedCodeMap();
// Add a new entry to the optimized code map.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
@@ -5880,12 +5981,7 @@ class SharedFunctionInfo: public HeapObject {
// Dispatched behavior.
// Set max_length to -1 for unlimited length.
void SourceCodePrint(StringStream* accumulator, int max_length);
-#ifdef OBJECT_PRINT
- inline void SharedFunctionInfoPrint() {
- SharedFunctionInfoPrint(stdout);
- }
- void SharedFunctionInfoPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(SharedFunctionInfo)
DECLARE_VERIFIER(SharedFunctionInfo)
void ResetForNewContext(int new_ic_age);
@@ -6115,12 +6211,7 @@ class JSModule: public JSObject {
static inline JSModule* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSModulePrint() {
- JSModulePrint(stdout);
- }
- void JSModulePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSModule)
DECLARE_VERIFIER(JSModule)
// Layout description.
@@ -6274,12 +6365,7 @@ class JSFunction: public JSObject {
void JSFunctionIterateBody(int object_size, ObjectVisitor* v);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSFunctionPrint() {
- JSFunctionPrint(stdout);
- }
- void JSFunctionPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSFunction)
DECLARE_VERIFIER(JSFunction)
// Returns the number of allocated literals.
@@ -6288,6 +6374,18 @@ class JSFunction: public JSObject {
// Retrieve the native context from a function's literal array.
static Context* NativeContextFromLiterals(FixedArray* literals);
+#ifdef DEBUG
+ bool FunctionsInFunctionListShareSameCode() {
+ Object* current = this;
+ while (!current->IsUndefined()) {
+ JSFunction* function = JSFunction::cast(current);
+ current = function->next_function_link();
+ if (function->code() != this->code()) return false;
+ }
+ return true;
+ }
+#endif
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
static const int kCodeEntryOffset = JSObject::kHeaderSize;
@@ -6333,12 +6431,7 @@ class JSGlobalProxy : public JSObject {
static inline JSGlobalProxy* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalProxyPrint() {
- JSGlobalProxyPrint(stdout);
- }
- void JSGlobalProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSGlobalProxy)
DECLARE_VERIFIER(JSGlobalProxy)
// Layout description.
@@ -6386,7 +6479,7 @@ class GlobalObject: public JSObject {
Handle<GlobalObject> global,
Handle<String> name);
// TODO(kmillikin): This function can be eliminated once the stub cache is
- // full handlified (and the static helper can be written directly).
+ // fully handlified (and the static helper can be written directly).
MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
// Casting.
@@ -6411,12 +6504,7 @@ class JSGlobalObject: public GlobalObject {
static inline JSGlobalObject* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSGlobalObjectPrint() {
- JSGlobalObjectPrint(stdout);
- }
- void JSGlobalObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSGlobalObject)
DECLARE_VERIFIER(JSGlobalObject)
// Layout description.
@@ -6443,12 +6531,7 @@ class JSBuiltinsObject: public GlobalObject {
static inline JSBuiltinsObject* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSBuiltinsObjectPrint() {
- JSBuiltinsObjectPrint(stdout);
- }
- void JSBuiltinsObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSBuiltinsObject)
DECLARE_VERIFIER(JSBuiltinsObject)
// Layout description. The size of the builtins object includes
@@ -6484,12 +6567,7 @@ class JSValue: public JSObject {
static inline JSValue* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSValuePrint() {
- JSValuePrint(stdout);
- }
- void JSValuePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSValue)
DECLARE_VERIFIER(JSValue)
// Layout description.
@@ -6538,12 +6616,7 @@ class JSDate: public JSObject {
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSDatePrint() {
- JSDatePrint(stdout);
- }
- void JSDatePrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSDate)
DECLARE_VERIFIER(JSDate)
// The order is important. It must be kept in sync with date macros
@@ -6635,12 +6708,7 @@ class JSMessageObject: public JSObject {
static inline JSMessageObject* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSMessageObjectPrint() {
- JSMessageObjectPrint(stdout);
- }
- void JSMessageObjectPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSMessageObject)
DECLARE_VERIFIER(JSMessageObject)
// Layout description.
@@ -6880,12 +6948,8 @@ class CodeCache: public Struct {
static inline CodeCache* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void CodeCachePrint() {
- CodeCachePrint(stdout);
- }
- void CodeCachePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(CodeCache)
DECLARE_VERIFIER(CodeCache)
static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
@@ -6969,12 +7033,8 @@ class PolymorphicCodeCache: public Struct {
static inline PolymorphicCodeCache* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void PolymorphicCodeCachePrint() {
- PolymorphicCodeCachePrint(stdout);
- }
- void PolymorphicCodeCachePrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(PolymorphicCodeCache)
DECLARE_VERIFIER(PolymorphicCodeCache)
static const int kCacheOffset = HeapObject::kHeaderSize;
@@ -7022,12 +7082,8 @@ class TypeFeedbackInfo: public Struct {
static inline TypeFeedbackInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void TypeFeedbackInfoPrint() {
- TypeFeedbackInfoPrint(stdout);
- }
- void TypeFeedbackInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(TypeFeedbackInfo)
DECLARE_VERIFIER(TypeFeedbackInfo)
static const int kStorage1Offset = HeapObject::kHeaderSize;
@@ -7053,6 +7109,38 @@ class TypeFeedbackInfo: public Struct {
};
+enum AllocationSiteMode {
+ DONT_TRACK_ALLOCATION_SITE,
+ TRACK_ALLOCATION_SITE,
+ LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE
+};
+
+
+class AllocationSiteInfo: public Struct {
+ public:
+ DECL_ACCESSORS(payload, Object)
+
+ static inline AllocationSiteInfo* cast(Object* obj);
+
+ DECLARE_PRINTER(AllocationSiteInfo)
+ DECLARE_VERIFIER(AllocationSiteInfo)
+
+ // Returns NULL if no AllocationSiteInfo is available for object.
+ static AllocationSiteInfo* FindForJSObject(JSObject* object);
+
+ static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind);
+ static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
+
+ static const int kPayloadOffset = HeapObject::kHeaderSize;
+ static const int kSize = kPayloadOffset + kPointerSize;
+ static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+
+ bool GetElementsKindPayload(ElementsKind* kind);
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo);
+};
+
+
// Representation of a slow alias as part of a non-strict arguments objects.
// For fast aliases (if HasNonStrictArgumentsElements()):
// - the parameter map contains an index into the context
@@ -7068,12 +7156,8 @@ class AliasedArgumentsEntry: public Struct {
static inline AliasedArgumentsEntry* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AliasedArgumentsEntryPrint() {
- AliasedArgumentsEntryPrint(stdout);
- }
- void AliasedArgumentsEntryPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AliasedArgumentsEntry)
DECLARE_VERIFIER(AliasedArgumentsEntry)
static const int kAliasedContextSlot = HeapObject::kHeaderSize;
@@ -7092,30 +7176,15 @@ class StringHasher {
public:
explicit inline StringHasher(int length, uint32_t seed);
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
-
- // Add a character to the hash and update the array index calculation.
- inline void AddCharacter(uint32_t c);
-
- // Adds a character to the hash but does not update the array index
- // calculation. This can only be called when it has been verified
- // that the input is not an array index.
- inline void AddCharacterNoIndex(uint32_t c);
-
- // Add a character above 0xffff as a surrogate pair. These can get into
- // the hasher through the routines that take a UTF-8 string and make a symbol.
- void AddSurrogatePair(uc32 c);
- void AddSurrogatePairNoIndex(uc32 c);
-
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
+ template <typename schar>
+ static inline uint32_t HashSequentialString(const schar* chars,
+ int length,
+ uint32_t seed);
- // Returns true if the characters seen so far make up a legal array
- // index.
- bool is_array_index() { return is_array_index_; }
+ // Reads all the data, even for long strings and computes the utf16 length.
+ static uint32_t ComputeUtf8Hash(Vector<const char> chars,
+ uint32_t seed,
+ int* utf16_length_out);
// Calculated hash value for a string consisting of 1 to
// String::kMaxArrayIndexSize digits with no leading zeros (except "0").
@@ -7127,51 +7196,36 @@ class StringHasher {
// use 27 instead.
static const int kZeroHash = 27;
- private:
- uint32_t array_index() {
- ASSERT(is_array_index());
- return array_index_;
- }
-
- inline uint32_t GetHash();
-
// Reusable parts of the hashing algorithm.
- INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint32_t c));
+ INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
INLINE(static uint32_t GetHashCore(uint32_t running_hash));
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- bool is_first_char_;
- friend class TwoCharHashTableKey;
-
- template <bool seq_ascii> friend class JsonParser;
-};
-
-
-class IncrementalAsciiStringHasher {
- public:
- explicit inline IncrementalAsciiStringHasher(uint32_t seed, char first_char);
- inline void AddCharacter(uc32 c);
- inline uint32_t GetHash();
+ protected:
+ // Returns the value to store in the hash field of a string with
+ // the given length and contents.
+ uint32_t GetHashField();
+ // Returns true if the hash of this string can be computed without
+ // looking at the contents.
+ inline bool has_trivial_hash();
+ // Adds a block of characters to the hash.
+ template<typename Char>
+ inline void AddCharacters(const Char* chars, int len);
private:
+ // Add a character to the hash.
+ inline void AddCharacter(uint16_t c);
+ // Update index. Returns true if string is still an index.
+ inline bool UpdateIndex(uint16_t c);
+
int length_;
uint32_t raw_running_hash_;
uint32_t array_index_;
bool is_array_index_;
- char first_char_;
+ bool is_first_char_;
+ DISALLOW_COPY_AND_ASSIGN(StringHasher);
};
-// Calculates string hash.
-template <typename schar>
-inline uint32_t HashSequentialString(const schar* chars,
- int length,
- uint32_t seed);
-
-
// The characteristics of a string are stored in its map. Retrieving these
// few bits of information is moderately expensive, involving two memory
// loads where the second is dependent on the first. To improve efficiency
@@ -7197,7 +7251,7 @@ class StringShape BASE_EMBEDDED {
inline bool IsExternalTwoByte();
inline bool IsSequentialAscii();
inline bool IsSequentialTwoByte();
- inline bool IsSymbol();
+ inline bool IsInternalized();
inline StringRepresentationTag representation_tag();
inline uint32_t encoding_tag();
inline uint32_t full_representation_tag();
@@ -7221,6 +7275,102 @@ class StringShape BASE_EMBEDDED {
};
+// The Name abstract class captures anything that can be used as a property
+// name, i.e., strings and symbols. All names store a hash value.
+class Name: public HeapObject {
+ public:
+ // Get and set the hash field of the name.
+ inline uint32_t hash_field();
+ inline void set_hash_field(uint32_t value);
+
+ // Tells whether the hash code has been computed.
+ inline bool HasHashCode();
+
+ // Returns a hash value used for the property table
+ inline uint32_t Hash();
+
+ // Casting.
+ static inline Name* cast(Object* obj);
+
+ // Layout description.
+ static const int kHashFieldOffset = HeapObject::kHeaderSize;
+ static const int kSize = kHashFieldOffset + kPointerSize;
+
+ // Mask constant for checking if a name has a computed hash code
+ // and if it is a string that is an array index. The least significant bit
+ // indicates whether a hash code has been computed. If the hash code has
+ // been computed the 2nd bit tells whether the string can be used as an
+ // array index.
+ static const int kHashNotComputedMask = 1;
+ static const int kIsNotArrayIndexMask = 1 << 1;
+ static const int kNofHashBitFields = 2;
+
+ // Shift constant retrieving hash code from hash field.
+ static const int kHashShift = kNofHashBitFields;
+
+ // Only these bits are relevant in the hash, since the top two are shifted
+ // out.
+ static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
+
+ // Array index strings this short can keep their index in the hash field.
+ static const int kMaxCachedArrayIndexLength = 7;
+
+ // For strings which are array indexes the hash value has the string length
+ // mixed into the hash, mainly to avoid a hash value of zero which would be
+ // the case for the string '0'. 24 bits are used for the array index value.
+ static const int kArrayIndexValueBits = 24;
+ static const int kArrayIndexLengthBits =
+ kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
+
+ STATIC_CHECK((kArrayIndexLengthBits > 0));
+
+ static const int kArrayIndexHashLengthShift =
+ kArrayIndexValueBits + kNofHashBitFields;
+
+ static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
+
+ static const int kArrayIndexValueMask =
+ ((1 << kArrayIndexValueBits) - 1) << kHashShift;
+
+ // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
+ // could use a mask to test if the length of string is less than or equal to
+ // kMaxCachedArrayIndexLength.
+ STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
+
+ static const int kContainsCachedArrayIndexMask =
+ (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
+ kIsNotArrayIndexMask;
+
+ // Value of empty hash field indicating that the hash is not computed.
+ static const int kEmptyHashField =
+ kIsNotArrayIndexMask | kHashNotComputedMask;
+
+ protected:
+ static inline bool IsHashFieldComputed(uint32_t field);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Name);
+};
+
+
+// ES6 symbols.
+class Symbol: public Name {
+ public:
+ // Casting.
+ static inline Symbol* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Symbol)
+ DECLARE_VERIFIER(Symbol)
+
+ // Layout description.
+ static const int kSize = Name::kSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
+};
+
+
// The String abstract class captures JavaScript string values:
//
// Ecma-262:
@@ -7229,8 +7379,10 @@ class StringShape BASE_EMBEDDED {
// ordered sequence of zero or more 16-bit unsigned integer values.
//
// All string values have a length field.
-class String: public HeapObject {
+class String: public Name {
public:
+ enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
+
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -7245,11 +7397,11 @@ class String: public HeapObject {
// Returns true if the structure contains two-byte content.
bool IsTwoByte() { return state_ == TWO_BYTE; }
- // Return the ASCII content of the string. Only use if IsAscii() returns
+ // Return the one byte content of the string. Only use if IsAscii() returns
// true.
- Vector<const char> ToAsciiVector() {
+ Vector<const uint8_t> ToOneByteVector() {
ASSERT_EQ(ASCII, state_);
- return Vector<const char>::cast(buffer_);
+ return buffer_;
}
// Return the two-byte content of the string. Only use if IsTwoByte()
// returns true.
@@ -7262,15 +7414,15 @@ class String: public HeapObject {
enum State { NON_FLAT, ASCII, TWO_BYTE };
// Constructors only used by String::GetFlatContent().
- explicit FlatContent(Vector<const char> chars)
- : buffer_(Vector<const byte>::cast(chars)),
+ explicit FlatContent(Vector<const uint8_t> chars)
+ : buffer_(chars),
state_(ASCII) { }
explicit FlatContent(Vector<const uc16> chars)
: buffer_(Vector<const byte>::cast(chars)),
state_(TWO_BYTE) { }
FlatContent() : buffer_(), state_(NON_FLAT) { }
- Vector<const byte> buffer_;
+ Vector<const uint8_t> buffer_;
State state_;
friend class String;
@@ -7280,27 +7432,25 @@ class String: public HeapObject {
inline int length();
inline void set_length(int value);
- // Get and set the hash field of the string.
- inline uint32_t hash_field();
- inline void set_hash_field(uint32_t value);
-
// Returns whether this string has only ASCII chars, i.e. all of them can
// be ASCII encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for ASCII data.
- inline bool IsAsciiRepresentation();
+ inline bool IsOneByteRepresentation();
inline bool IsTwoByteRepresentation();
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
// Requires: this->IsFlat()
- inline bool IsAsciiRepresentationUnderneath();
+ inline bool IsOneByteRepresentationUnderneath();
inline bool IsTwoByteRepresentationUnderneath();
// NOTE: this should be considered only a hint. False negatives are
// possible.
inline bool HasOnlyAsciiChars();
+ inline bool IsOneByteConvertible();
+
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
// Get individual two byte char in the string. Repeated calls
@@ -7351,8 +7501,8 @@ class String: public HeapObject {
// String equality operations.
inline bool Equals(String* other);
- bool IsEqualTo(Vector<const char> str);
- bool IsAsciiEqualTo(Vector<const char> str);
+ bool IsUtf8EqualTo(Vector<const char> str);
+ bool IsOneByteEqualTo(Vector<const uint8_t> str);
bool IsTwoByteEqualTo(Vector<const uc16> str);
// Return a UTF8 representation of the string. The string is null
@@ -7382,19 +7532,7 @@ class String: public HeapObject {
SmartArrayPointer<uc16> ToWideCString(
RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL);
- // Tells whether the hash code has been computed.
- inline bool HasHashCode();
-
- // Returns a hash value used for the property table
- inline uint32_t Hash();
-
- static uint32_t ComputeHashField(unibrow::CharacterStream* buffer,
- int length,
- uint32_t seed);
-
- static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
- uint32_t* index,
- int length);
+ bool ComputeArrayIndex(uint32_t* index);
// Externalization.
bool MakeExternal(v8::String::ExternalStringResource* resource);
@@ -7426,69 +7564,18 @@ class String: public HeapObject {
inline bool IsFlat();
// Layout description.
- static const int kLengthOffset = HeapObject::kHeaderSize;
- static const int kHashFieldOffset = kLengthOffset + kPointerSize;
- static const int kSize = kHashFieldOffset + kPointerSize;
+ static const int kLengthOffset = Name::kSize;
+ static const int kSize = kLengthOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
static const int kMaxArrayIndexSize = 10;
-
- // Max ASCII char code.
- static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar;
- static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar;
- static const int kMaxUtf16CodeUnit = 0xffff;
-
- // Mask constant for checking if a string has a computed hash code
- // and if it is an array index. The least significant bit indicates
- // whether a hash code has been computed. If the hash code has been
- // computed the 2nd bit tells whether the string can be used as an
- // array index.
- static const int kHashNotComputedMask = 1;
- static const int kIsNotArrayIndexMask = 1 << 1;
- static const int kNofHashBitFields = 2;
-
- // Shift constant retrieving hash code from hash field.
- static const int kHashShift = kNofHashBitFields;
-
- // Only these bits are relevant in the hash, since the top two are shifted
- // out.
- static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift;
-
- // Array index strings this short can keep their index in the hash
- // field.
- static const int kMaxCachedArrayIndexLength = 7;
-
- // For strings which are array indexes the hash value has the string length
- // mixed into the hash, mainly to avoid a hash value of zero which would be
- // the case for the string '0'. 24 bits are used for the array index value.
- static const int kArrayIndexValueBits = 24;
- static const int kArrayIndexLengthBits =
- kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields;
-
- STATIC_CHECK((kArrayIndexLengthBits > 0));
STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits));
- static const int kArrayIndexHashLengthShift =
- kArrayIndexValueBits + kNofHashBitFields;
-
- static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
-
- static const int kArrayIndexValueMask =
- ((1 << kArrayIndexValueBits) - 1) << kHashShift;
-
- // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we
- // could use a mask to test if the length of string is less than or equal to
- // kMaxCachedArrayIndexLength.
- STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
-
- static const int kContainsCachedArrayIndexMask =
- (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) |
- kIsNotArrayIndexMask;
-
- // Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField =
- kIsNotArrayIndexMask | kHashNotComputedMask;
+ // Max char codes.
+ static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+ static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar;
+ static const int kMaxUtf16CodeUnit = 0xffff;
// Value of hash field containing computed hash equal to zero.
static const int kEmptyStringHash = kIsNotArrayIndexMask;
@@ -7507,18 +7594,6 @@ class String: public HeapObject {
const uc16* GetTwoByteData();
const uc16* GetTwoByteData(unsigned start);
- // Support for StringInputBuffer
- static const unibrow::byte* ReadBlock(String* input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
- static const unibrow::byte* ReadBlock(String** input,
- unibrow::byte* util_buffer,
- unsigned capacity,
- unsigned* remaining,
- unsigned* offset);
-
// Helper function for flattening strings.
template <typename sinkchar>
static void WriteToFlat(String* source,
@@ -7533,7 +7608,7 @@ class String: public HeapObject {
const char* start = chars;
const char* limit = chars + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
- ASSERT(kMaxAsciiCharCode == 0x7F);
+ ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F);
const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
while (chars + sizeof(uintptr_t) <= limit) {
if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
@@ -7543,7 +7618,7 @@ class String: public HeapObject {
}
#endif
while (chars < limit) {
- if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) {
+ if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
return static_cast<int>(chars - start);
}
++chars;
@@ -7555,55 +7630,41 @@ class String: public HeapObject {
return NonAsciiStart(chars, length) >= length;
}
- static inline int NonAsciiStart(const uc16* chars, int length) {
+ static inline bool IsAscii(const uint8_t* chars, int length) {
+ return
+ NonAsciiStart(reinterpret_cast<const char*>(chars), length) >= length;
+ }
+
+ static inline int NonOneByteStart(const uc16* chars, int length) {
const uc16* limit = chars + length;
const uc16* start = chars;
while (chars < limit) {
- if (*chars > kMaxAsciiCharCodeU) return static_cast<int>(chars - start);
+ if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start);
++chars;
}
return static_cast<int>(chars - start);
}
- static inline bool IsAscii(const uc16* chars, int length) {
- return NonAsciiStart(chars, length) >= length;
+ static inline bool IsOneByte(const uc16* chars, int length) {
+ return NonOneByteStart(chars, length) >= length;
}
- protected:
- class ReadBlockBuffer {
- public:
- ReadBlockBuffer(unibrow::byte* util_buffer_,
- unsigned cursor_,
- unsigned capacity_,
- unsigned remaining_) :
- util_buffer(util_buffer_),
- cursor(cursor_),
- capacity(capacity_),
- remaining(remaining_) {
- }
- unibrow::byte* util_buffer;
- unsigned cursor;
- unsigned capacity;
- unsigned remaining;
- };
-
- static inline const unibrow::byte* ReadBlock(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned max_chars);
- static void ReadBlockIntoBuffer(String* input,
- ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned max_chars);
+ template<class Visitor, class ConsOp>
+ static inline void Visit(String* string,
+ unsigned offset,
+ Visitor& visitor,
+ ConsOp& cons_op,
+ int32_t type,
+ unsigned length);
private:
+ friend class Name;
+
// Try to flatten the top level ConsString that is hiding behind this
// string. This is a no-op unless the string is a ConsString. Flatten
// mutates the ConsString and might return a failure.
MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure);
- static inline bool IsHashFieldComputed(uint32_t field);
-
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@@ -7632,6 +7693,11 @@ class SeqString: public String {
static const int kSymbolIdOffset = String::kSize;
static const int kHeaderSize = kSymbolIdOffset + kPointerSize;
+ // Truncate the string in-place if possible and return the result.
+ // In case of new_length == 0, the empty string is returned without
+ // truncating the original string.
+ MUST_USE_RESULT String* Truncate(int new_length);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -7639,26 +7705,26 @@ class SeqString: public String {
// The AsciiString class captures sequential ASCII string objects.
// Each character in the AsciiString is an ASCII character.
-class SeqAsciiString: public SeqString {
+class SeqOneByteString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
// Dispatched behavior.
- inline uint16_t SeqAsciiStringGet(int index);
- inline void SeqAsciiStringSet(int index, uint16_t value);
+ inline uint16_t SeqOneByteStringGet(int index);
+ inline void SeqOneByteStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
inline Address GetCharsAddress();
- inline char* GetChars();
+ inline uint8_t* GetChars();
// Casting
- static inline SeqAsciiString* cast(Object* obj);
+ static inline SeqOneByteString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an AsciiString
// instance.
- inline int SeqAsciiStringSize(InstanceType instance_type);
+ inline int SeqOneByteStringSize(InstanceType instance_type);
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
@@ -7671,18 +7737,10 @@ class SeqAsciiString: public SeqString {
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize);
- // Support for StringInputBuffer.
- inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
- inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
-
- DECLARE_VERIFIER(SeqAsciiString)
+ DECLARE_VERIFIER(SeqOneByteString)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
};
@@ -7723,11 +7781,6 @@ class SeqTwoByteString: public SeqString {
// Q.v. String::kMaxLength which is the maximal size of concatenated strings.
static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
- // Support for StringInputBuffer.
- inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
};
@@ -7770,14 +7823,6 @@ class ConsString: public String {
static const int kSecondOffset = kFirstOffset + kPointerSize;
static const int kSize = kSecondOffset + kPointerSize;
- // Support for StringInputBuffer.
- inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
// Minimum length for a cons string.
static const int kMinLength = 13;
@@ -7822,13 +7867,6 @@ class SlicedString: public String {
static const int kOffsetOffset = kParentOffset + kPointerSize;
static const int kSize = kOffsetOffset + kPointerSize;
- // Support for StringInputBuffer
- inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
- inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
// Minimum length for a sliced string.
static const int kMinLength = 13;
@@ -7863,6 +7901,9 @@ class ExternalString: public String {
static const int kResourceDataOffset = kResourceOffset + kPointerSize;
static const int kSize = kResourceDataOffset + kPointerSize;
+ static const int kMaxShortLength =
+ (kShortSize - SeqString::kHeaderSize) / kCharSize;
+
// Return whether external string is short (data pointer is not cached).
inline bool is_short();
@@ -7891,7 +7932,7 @@ class ExternalAsciiString: public ExternalString {
// which the pointer cache has to be refreshed.
inline void update_data_cache();
- inline const char* GetChars();
+ inline const uint8_t* GetChars();
// Dispatched behavior.
inline uint16_t ExternalAsciiStringGet(int index);
@@ -7905,14 +7946,6 @@ class ExternalAsciiString: public ExternalString {
template<typename StaticVisitor>
inline void ExternalAsciiStringIterateBody();
- // Support for StringInputBuffer.
- const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
- inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
};
@@ -7953,12 +7986,6 @@ class ExternalTwoByteString: public ExternalString {
template<typename StaticVisitor>
inline void ExternalTwoByteStringIterateBody();
-
- // Support for StringInputBuffer.
- void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString);
};
@@ -8005,32 +8032,82 @@ class FlatStringReader : public Relocatable {
};
-// Note that StringInputBuffers are not valid across a GC! To fix this
-// it would have to store a String Handle instead of a String* and
-// AsciiStringReadBlock would have to be modified to use memcpy.
-//
-// StringInputBuffer is able to traverse any string regardless of how
-// deeply nested a sequence of ConsStrings it is made of. However,
-// performance will be better if deep strings are flattened before they
-// are traversed. Since flattening requires memory allocation this is
-// not always desirable, however (esp. in debugging situations).
-class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
+// A ConsStringOp that returns null.
+// Useful when the operation to apply on a ConsString
+// requires an expensive data structure.
+class ConsStringNullOp {
public:
- virtual void Seek(unsigned pos);
- inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {}
- explicit inline StringInputBuffer(String* backing):
- unibrow::InputBuffer<String, String*, 1024>(backing) {}
+ inline ConsStringNullOp() {}
+ static inline String* Operate(String*, unsigned*, int32_t*, unsigned*);
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConsStringNullOp);
};
-class SafeStringInputBuffer
- : public unibrow::InputBuffer<String, String**, 256> {
+// This maintains an off-stack representation of the stack frames required
+// to traverse a ConsString, allowing an entirely iterative and restartable
+// traversal of the entire string
+// Note: this class is not GC-safe.
+class ConsStringIteratorOp {
+ public:
+ inline ConsStringIteratorOp() {}
+ String* Operate(String* string,
+ unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out);
+ inline String* ContinueOperation(int32_t* type_out, unsigned* length_out);
+ inline void Reset();
+ inline bool HasMore();
+
+ private:
+ // TODO(dcarney): Templatize this out for different stack sizes.
+ static const unsigned kStackSize = 32;
+ // Use a mask instead of doing modulo operations for stack wrapping.
+ static const unsigned kDepthMask = kStackSize-1;
+ STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
+ static inline unsigned OffsetForDepth(unsigned depth);
+
+ inline void PushLeft(ConsString* string);
+ inline void PushRight(ConsString* string);
+ inline void AdjustMaximumDepth();
+ inline void Pop();
+ String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out);
+ String* Search(unsigned* offset_out,
+ int32_t* type_out,
+ unsigned* length_out);
+
+ unsigned depth_;
+ unsigned maximum_depth_;
+ // Stack must always contain only frames for which right traversal
+ // has not yet been performed.
+ ConsString* frames_[kStackSize];
+ unsigned consumed_;
+ ConsString* root_;
+ DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
+};
+
+
+// Note: this class is not GC-safe.
+class StringCharacterStream {
public:
- virtual void Seek(unsigned pos);
- inline SafeStringInputBuffer()
- : unibrow::InputBuffer<String, String**, 256>() {}
- explicit inline SafeStringInputBuffer(String** backing)
- : unibrow::InputBuffer<String, String**, 256>(backing) {}
+ inline StringCharacterStream(String* string,
+ ConsStringIteratorOp* op,
+ unsigned offset = 0);
+ inline uint16_t GetNext();
+ inline bool HasMore();
+ inline void Reset(String* string, unsigned offset = 0);
+ inline void VisitOneByteString(const uint8_t* chars, unsigned length);
+ inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
+
+ private:
+ bool is_one_byte_;
+ union {
+ const uint8_t* buffer8_;
+ const uint16_t* buffer16_;
+ };
+ const uint8_t* end_;
+ ConsStringIteratorOp* op_;
+ DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
};
@@ -8114,15 +8191,10 @@ class JSGlobalPropertyCell: public HeapObject {
return address() + kValueOffset;
}
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSGlobalPropertyCell)
DECLARE_VERIFIER(JSGlobalPropertyCell)
-#ifdef OBJECT_PRINT
- inline void JSGlobalPropertyCellPrint() {
- JSGlobalPropertyCellPrint(stdout);
- }
- void JSGlobalPropertyCellPrint(FILE* out);
-#endif
-
// Layout description.
static const int kValueOffset = HeapObject::kHeaderSize;
static const int kSize = kValueOffset + kPointerSize;
@@ -8212,12 +8284,7 @@ class JSProxy: public JSReceiver {
Handle<Object> args[]);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSProxyPrint() {
- JSProxyPrint(stdout);
- }
- void JSProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSProxy)
DECLARE_VERIFIER(JSProxy)
// Layout description. We add padding so that a proxy has the same
@@ -8253,12 +8320,7 @@ class JSFunctionProxy: public JSProxy {
static inline JSFunctionProxy* cast(Object* obj);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSFunctionProxyPrint() {
- JSFunctionProxyPrint(stdout);
- }
- void JSFunctionProxyPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSFunctionProxy)
DECLARE_VERIFIER(JSFunctionProxy)
// Layout description.
@@ -8288,12 +8350,8 @@ class JSSet: public JSObject {
// Casting.
static inline JSSet* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSSetPrint() {
- JSSetPrint(stdout);
- }
- void JSSetPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSSet)
DECLARE_VERIFIER(JSSet)
static const int kTableOffset = JSObject::kHeaderSize;
@@ -8313,12 +8371,8 @@ class JSMap: public JSObject {
// Casting.
static inline JSMap* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSMapPrint() {
- JSMapPrint(stdout);
- }
- void JSMapPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSMap)
DECLARE_VERIFIER(JSMap)
static const int kTableOffset = JSObject::kHeaderSize;
@@ -8341,12 +8395,8 @@ class JSWeakMap: public JSObject {
// Casting.
static inline JSWeakMap* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void JSWeakMapPrint() {
- JSWeakMapPrint(stdout);
- }
- void JSWeakMapPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSWeakMap)
DECLARE_VERIFIER(JSWeakMap)
static const int kTableOffset = JSObject::kHeaderSize;
@@ -8376,12 +8426,8 @@ class Foreign: public HeapObject {
template<typename StaticVisitor>
inline void ForeignIterateBody();
-#ifdef OBJECT_PRINT
- inline void ForeignPrint() {
- ForeignPrint(stdout);
- }
- void ForeignPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(Foreign)
DECLARE_VERIFIER(Foreign)
// Layout description.
@@ -8416,10 +8462,11 @@ class JSArray: public JSObject {
// Initialize the array with the given capacity. The function may
// fail due to out-of-memory situations, but only if the requested
// capacity is non-zero.
- MUST_USE_RESULT MaybeObject* Initialize(int capacity);
+ MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0);
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
+ // Can cause GC.
MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
// Set the content of the array to the content of storage.
@@ -8433,12 +8480,7 @@ class JSArray: public JSObject {
inline void EnsureSize(int minimum_size_of_backing_fixed_array);
// Dispatched behavior.
-#ifdef OBJECT_PRINT
- inline void JSArrayPrint() {
- JSArrayPrint(stdout);
- }
- void JSArrayPrint(FILE* out);
-#endif
+ DECLARE_PRINTER(JSArray)
DECLARE_VERIFIER(JSArray)
// Number of element slots to pre-allocate for an empty array.
@@ -8477,20 +8519,8 @@ class JSRegExpResult: public JSArray {
};
-// An accessor must have a getter, but can have no setter.
-//
-// When setting a property, V8 searches accessors in prototypes.
-// If an accessor was found and it does not have a setter,
-// the request is ignored.
-//
-// If the accessor in the prototype has the READ_ONLY property attribute, then
-// a new value is added to the local object when the property is set.
-// This shadows the accessor in the prototype.
class AccessorInfo: public Struct {
public:
- DECL_ACCESSORS(getter, Object)
- DECL_ACCESSORS(setter, Object)
- DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(name, Object)
DECL_ACCESSORS(flag, Smi)
DECL_ACCESSORS(expected_receiver_type, Object)
@@ -8512,18 +8542,11 @@ class AccessorInfo: public Struct {
static inline AccessorInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AccessorInfoPrint() {
- AccessorInfoPrint(stdout);
- }
- void AccessorInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
DECLARE_VERIFIER(AccessorInfo)
- static const int kGetterOffset = HeapObject::kHeaderSize;
- static const int kSetterOffset = kGetterOffset + kPointerSize;
- static const int kDataOffset = kSetterOffset + kPointerSize;
- static const int kNameOffset = kDataOffset + kPointerSize;
+
+ static const int kNameOffset = HeapObject::kHeaderSize;
static const int kFlagOffset = kNameOffset + kPointerSize;
static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize;
static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
@@ -8539,6 +8562,74 @@ class AccessorInfo: public Struct {
};
+class DeclaredAccessorDescriptor: public Struct {
+ public:
+ // TODO(dcarney): Fill out this class.
+ DECL_ACCESSORS(internal_field, Smi)
+
+ static inline DeclaredAccessorDescriptor* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(DeclaredAccessorDescriptor)
+ DECLARE_VERIFIER(DeclaredAccessorDescriptor)
+
+ static const int kInternalFieldOffset = HeapObject::kHeaderSize;
+ static const int kSize = kInternalFieldOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor);
+};
+
+
+class DeclaredAccessorInfo: public AccessorInfo {
+ public:
+ DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
+
+ static inline DeclaredAccessorInfo* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(DeclaredAccessorInfo)
+ DECLARE_VERIFIER(DeclaredAccessorInfo)
+
+ static const int kDescriptorOffset = AccessorInfo::kSize;
+ static const int kSize = kDescriptorOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo);
+};
+
+
+// An accessor must have a getter, but can have no setter.
+//
+// When setting a property, V8 searches accessors in prototypes.
+// If an accessor was found and it does not have a setter,
+// the request is ignored.
+//
+// If the accessor in the prototype has the READ_ONLY property attribute, then
+// a new value is added to the local object when the property is set.
+// This shadows the accessor in the prototype.
+class ExecutableAccessorInfo: public AccessorInfo {
+ public:
+ DECL_ACCESSORS(getter, Object)
+ DECL_ACCESSORS(setter, Object)
+ DECL_ACCESSORS(data, Object)
+
+ static inline ExecutableAccessorInfo* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(ExecutableAccessorInfo)
+ DECLARE_VERIFIER(ExecutableAccessorInfo)
+
+ static const int kGetterOffset = AccessorInfo::kSize;
+ static const int kSetterOffset = kGetterOffset + kPointerSize;
+ static const int kDataOffset = kSetterOffset + kPointerSize;
+ static const int kSize = kDataOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo);
+};
+
+
// Support for JavaScript accessors: A pair of a getter and a setter. Each
// accessor can either be
// * a pointer to a JavaScript function or proxy: a real accessor
@@ -8579,9 +8670,8 @@ class AccessorPair: public Struct {
return IsJSAccessor(getter()) || IsJSAccessor(setter());
}
-#ifdef OBJECT_PRINT
- void AccessorPairPrint(FILE* out = stdout);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AccessorPair)
DECLARE_VERIFIER(AccessorPair)
static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8610,12 +8700,8 @@ class AccessCheckInfo: public Struct {
static inline AccessCheckInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void AccessCheckInfoPrint() {
- AccessCheckInfoPrint(stdout);
- }
- void AccessCheckInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(AccessCheckInfo)
DECLARE_VERIFIER(AccessCheckInfo)
static const int kNamedCallbackOffset = HeapObject::kHeaderSize;
@@ -8640,12 +8726,8 @@ class InterceptorInfo: public Struct {
static inline InterceptorInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void InterceptorInfoPrint() {
- InterceptorInfoPrint(stdout);
- }
- void InterceptorInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(InterceptorInfo)
DECLARE_VERIFIER(InterceptorInfo)
static const int kGetterOffset = HeapObject::kHeaderSize;
@@ -8669,12 +8751,8 @@ class CallHandlerInfo: public Struct {
static inline CallHandlerInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void CallHandlerInfoPrint() {
- CallHandlerInfoPrint(stdout);
- }
- void CallHandlerInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(CallHandlerInfo)
DECLARE_VERIFIER(CallHandlerInfo)
static const int kCallbackOffset = HeapObject::kHeaderSize;
@@ -8718,6 +8796,9 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(access_check_info, Object)
DECL_ACCESSORS(flag, Smi)
+ inline int length();
+ inline void set_length(int value);
+
// Following properties use flag bits.
DECL_BOOLEAN_ACCESSORS(hidden_prototype)
DECL_BOOLEAN_ACCESSORS(undetectable)
@@ -8728,12 +8809,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static inline FunctionTemplateInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void FunctionTemplateInfoPrint() {
- FunctionTemplateInfoPrint(stdout);
- }
- void FunctionTemplateInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(FunctionTemplateInfo)
DECLARE_VERIFIER(FunctionTemplateInfo)
static const int kSerialNumberOffset = TemplateInfo::kHeaderSize;
@@ -8755,7 +8832,8 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kAccessCheckInfoOffset =
kInstanceCallHandlerOffset + kPointerSize;
static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize;
- static const int kSize = kFlagOffset + kPointerSize;
+ static const int kLengthOffset = kFlagOffset + kPointerSize;
+ static const int kSize = kLengthOffset + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
@@ -8777,19 +8855,17 @@ class ObjectTemplateInfo: public TemplateInfo {
static inline ObjectTemplateInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void ObjectTemplateInfoPrint() {
- ObjectTemplateInfoPrint(stdout);
- }
- void ObjectTemplateInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(ObjectTemplateInfo)
DECLARE_VERIFIER(ObjectTemplateInfo)
static const int kConstructorOffset = TemplateInfo::kHeaderSize;
static const int kInternalFieldCountOffset =
kConstructorOffset + kPointerSize;
- static const int kHasExternalResourceOffset = kInternalFieldCountOffset + kPointerSize;
- static const int kUseUserObjectComparisonOffset = kHasExternalResourceOffset + kPointerSize;
+ static const int kHasExternalResourceOffset =
+ kInternalFieldCountOffset + kPointerSize;
+ static const int kUseUserObjectComparisonOffset =
+ kHasExternalResourceOffset + kPointerSize;
static const int kSize = kUseUserObjectComparisonOffset + kPointerSize;
};
@@ -8801,12 +8877,8 @@ class SignatureInfo: public Struct {
static inline SignatureInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void SignatureInfoPrint() {
- SignatureInfoPrint(stdout);
- }
- void SignatureInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(SignatureInfo)
DECLARE_VERIFIER(SignatureInfo)
static const int kReceiverOffset = Struct::kHeaderSize;
@@ -8824,12 +8896,8 @@ class TypeSwitchInfo: public Struct {
static inline TypeSwitchInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void TypeSwitchInfoPrint() {
- TypeSwitchInfoPrint(stdout);
- }
- void TypeSwitchInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(TypeSwitchInfo)
DECLARE_VERIFIER(TypeSwitchInfo)
static const int kTypesOffset = Struct::kHeaderSize;
@@ -8874,12 +8942,8 @@ class DebugInfo: public Struct {
static inline DebugInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void DebugInfoPrint() {
- DebugInfoPrint(stdout);
- }
- void DebugInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(DebugInfo)
DECLARE_VERIFIER(DebugInfo)
static const int kSharedFunctionInfoIndex = Struct::kHeaderSize;
@@ -8930,12 +8994,8 @@ class BreakPointInfo: public Struct {
static inline BreakPointInfo* cast(Object* obj);
-#ifdef OBJECT_PRINT
- inline void BreakPointInfoPrint() {
- BreakPointInfoPrint(stdout);
- }
- void BreakPointInfoPrint(FILE* out);
-#endif
+ // Dispatched behavior.
+ DECLARE_PRINTER(BreakPointInfo)
DECLARE_VERIFIER(BreakPointInfo)
static const int kCodePositionIndex = Struct::kHeaderSize;
@@ -8957,10 +9017,10 @@ class BreakPointInfo: public Struct {
#undef DECLARE_VERIFIER
#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
- V(kSymbolTable, "symbol_table", "(Symbols)") \
+ V(kStringTable, "string_table", "(Internalized strings)") \
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSymbol, "symbol", "(Symbol)") \
+ V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
V(kRelocatable, "relocatable", "(Relocatable)") \
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.cc b/src/3rdparty/v8/src/optimizing-compiler-thread.cc
index 83ff104..39b45b1 100644
--- a/src/3rdparty/v8/src/optimizing-compiler-thread.cc
+++ b/src/3rdparty/v8/src/optimizing-compiler-thread.cc
@@ -53,6 +53,8 @@ void OptimizingCompilerThread::Run() {
while (true) {
input_queue_semaphore_->Wait();
+ Logger::TimerEventScope timer(
+ isolate_, Logger::TimerEventScope::v8_recompile_parallel);
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_->Signal();
if (FLAG_trace_parallel_recompilation) {
@@ -77,7 +79,13 @@ void OptimizingCompilerThread::Run() {
USE(status);
output_queue_.Enqueue(optimizing_compiler);
- isolate_->stack_guard()->RequestCodeReadyEvent();
+ if (!FLAG_manual_parallel_recompilation) {
+ isolate_->stack_guard()->RequestCodeReadyEvent();
+ } else {
+ // In manual mode, do not trigger a code ready event.
+ // Instead, wait for the optimized functions to be installed manually.
+ output_queue_semaphore_->Signal();
+ }
if (FLAG_trace_parallel_recompilation) {
time_spent_compiling_ += OS::Ticks() - compiling_start;
@@ -104,6 +112,9 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
int functions_installed = 0;
while (!output_queue_.IsEmpty()) {
+ if (FLAG_manual_parallel_recompilation) {
+ output_queue_semaphore_->Wait();
+ }
OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler);
Compiler::InstallOptimizedCode(compiler);
@@ -115,8 +126,22 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
}
+Handle<SharedFunctionInfo>
+ OptimizingCompilerThread::InstallNextOptimizedFunction() {
+ ASSERT(FLAG_manual_parallel_recompilation);
+ output_queue_semaphore_->Wait();
+ OptimizingCompiler* compiler = NULL;
+ output_queue_.Dequeue(&compiler);
+ Handle<SharedFunctionInfo> shared = compiler->info()->shared_info();
+ Compiler::InstallOptimizedCode(compiler);
+ return shared;
+}
+
+
void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* optimizing_compiler) {
+ ASSERT(IsQueueAvailable());
+ Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
input_queue_.Enqueue(optimizing_compiler);
input_queue_semaphore_->Signal();
}
diff --git a/src/3rdparty/v8/src/optimizing-compiler-thread.h b/src/3rdparty/v8/src/optimizing-compiler-thread.h
index d562726..7aad78c 100644
--- a/src/3rdparty/v8/src/optimizing-compiler-thread.h
+++ b/src/3rdparty/v8/src/optimizing-compiler-thread.h
@@ -29,23 +29,28 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "atomicops.h"
-#include "platform.h"
#include "flags.h"
+#include "platform.h"
#include "unbound-queue.h"
namespace v8 {
namespace internal {
-class HGraphBuilder;
+class HOptimizedGraphBuilder;
class OptimizingCompiler;
+class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
public:
explicit OptimizingCompilerThread(Isolate *isolate) :
Thread("OptimizingCompilerThread"),
+#ifdef DEBUG
+ thread_id_(0),
+#endif
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)),
+ output_queue_semaphore_(OS::CreateSemaphore(0)),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@@ -57,6 +62,9 @@ class OptimizingCompilerThread : public Thread {
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
+ // Wait for the next optimized function and install it.
+ Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
+
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
// after.
@@ -76,24 +84,26 @@ class OptimizingCompilerThread : public Thread {
#endif
~OptimizingCompilerThread() {
+ delete output_queue_semaphore_; // Only used for manual mode.
delete input_queue_semaphore_;
delete stop_semaphore_;
}
private:
+#ifdef DEBUG
+ int thread_id_;
+#endif
+
Isolate* isolate_;
Semaphore* stop_semaphore_;
Semaphore* input_queue_semaphore_;
+ Semaphore* output_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
int64_t time_spent_compiling_;
int64_t time_spent_total_;
-
-#ifdef DEBUG
- int thread_id_;
-#endif
};
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/parser.cc b/src/3rdparty/v8/src/parser.cc
index da4685f..b93cf43 100644
--- a/src/3rdparty/v8/src/parser.cc
+++ b/src/3rdparty/v8/src/parser.cc
@@ -52,7 +52,10 @@ namespace internal {
class PositionStack {
public:
explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {}
- ~PositionStack() { ASSERT(!*ok_ || is_empty()); }
+ ~PositionStack() {
+ ASSERT(!*ok_ || is_empty());
+ USE(ok_);
+ }
class Element {
public:
@@ -254,10 +257,10 @@ Handle<String> Parser::LookupSymbol(int symbol_id) {
if (static_cast<unsigned>(symbol_id)
>= static_cast<unsigned>(symbol_cache_.length())) {
if (scanner().is_literal_ascii()) {
- return isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
+ return isolate()->factory()->InternalizeOneByteString(
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
- return isolate()->factory()->LookupTwoByteSymbol(
+ return isolate()->factory()->InternalizeTwoByteString(
scanner().literal_utf16_string());
}
}
@@ -275,10 +278,10 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) {
Handle<String> result = symbol_cache_.at(symbol_id);
if (result.is_null()) {
if (scanner().is_literal_ascii()) {
- result = isolate()->factory()->LookupAsciiSymbol(
- scanner().literal_ascii_string());
+ result = isolate()->factory()->InternalizeOneByteString(
+ Vector<const uint8_t>::cast(scanner().literal_ascii_string()));
} else {
- result = isolate()->factory()->LookupTwoByteSymbol(
+ result = isolate()->factory()->InternalizeTwoByteString(
scanner().literal_utf16_string());
}
symbol_cache_.at(symbol_id) = result;
@@ -614,12 +617,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
- // Compute the parsing mode.
- Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL) mode = PARSE_EAGERLY;
- ParsingModeScope parsing_mode(this, mode);
-
- Handle<String> no_name = isolate()->factory()->empty_symbol();
+ Handle<String> no_name = isolate()->factory()->empty_string();
FunctionLiteral* result = NULL;
{ Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE);
@@ -637,6 +635,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
scope->set_start_position(0);
scope->set_end_position(source->length());
+ // Compute the parsing mode.
+ Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL || scope->is_eval_scope()) {
+ mode = PARSE_EAGERLY;
+ }
+ ParsingModeScope parsing_mode(this, mode);
+
FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
top_scope_->SetLanguageMode(info->language_mode());
if (info->is_qml_mode()) {
@@ -921,7 +926,7 @@ class ThisNamedPropertyAssignmentFinder {
if (literal != NULL &&
literal->handle()->IsString() &&
!String::cast(*(literal->handle()))->Equals(
- isolate_->heap()->Proto_symbol()) &&
+ isolate_->heap()->proto_string()) &&
!String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
Handle<String> key = Handle<String>::cast(literal->handle());
@@ -1058,19 +1063,21 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// Check "use strict" directive (ES5 14.1).
if (top_scope_->is_classic_mode() &&
- directive->Equals(isolate()->heap()->use_strict()) &&
+ directive->Equals(isolate()->heap()->use_strict_string()) &&
token_loc.end_pos - token_loc.beg_pos ==
- isolate()->heap()->use_strict()->length() + 2) {
+ isolate()->heap()->use_strict_string()->length() + 2) {
// TODO(mstarzinger): Global strict eval calls, need their own scope
// as specified in ES5 10.4.2(3). The correct fix would be to always
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
+ // In the same manner, we have to patch the parsing mode.
if (is_eval && !top_scope_->is_eval_scope()) {
ASSERT(top_scope_->is_global_scope());
Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
scope->set_start_position(top_scope_->start_position());
scope->set_end_position(top_scope_->end_position());
top_scope_ = scope;
+ mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
top_scope_->SetLanguageMode(FLAG_harmony_scoping
@@ -1143,7 +1150,7 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels,
if (estmt != NULL &&
estmt->expression()->AsVariableProxy() != NULL &&
estmt->expression()->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_symbol()) &&
+ isolate()->heap()->module_string()) &&
!scanner().literal_contains_escapes()) {
return ParseModuleDeclaration(NULL, ok);
}
@@ -1166,7 +1173,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
#endif
Module* module = ParseModule(CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, LET, module->interface());
+ VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
Declaration* declaration =
factory()->NewModuleDeclaration(proxy, module, top_scope_);
Declare(declaration, true, CHECK_OK);
@@ -1185,7 +1192,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
if (module->body() == NULL)
return factory()->NewEmptyStatement();
else
- return module->body();
+ return factory()->NewModuleStatement(proxy, module->body());
}
@@ -1334,12 +1341,15 @@ Module* Parser::ParseModuleUrl(bool* ok) {
if (FLAG_print_interface_details) PrintF("# Url ");
#endif
- Module* result = factory()->NewModuleUrl(symbol);
- Interface* interface = result->interface();
+ // Create an empty literal as long as the feature isn't finished.
+ USE(symbol);
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
+ Block* body = factory()->NewBlock(NULL, 1, false);
+ body->set_scope(scope);
+ Interface* interface = scope->interface();
+ Module* result = factory()->NewModuleLiteral(body, interface);
interface->Freeze(ok);
ASSERT(*ok);
- // Create dummy scope to avoid errors as long as the feature isn't finished.
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
interface->Unify(scope->interface(), zone(), ok);
ASSERT(*ok);
return result;
@@ -1428,7 +1438,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
case Token::IDENTIFIER: {
Handle<String> name = ParseIdentifier(CHECK_OK);
// Handle 'module' as a context-sensitive keyword.
- if (!name->IsEqualTo(CStrVector("module"))) {
+ if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
names.Add(name, zone());
while (peek() == Token::COMMA) {
Consume(Token::COMMA);
@@ -1708,12 +1718,11 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
*ok = false;
return;
}
- const char* type =
- (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let";
Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
+ isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
+ TENURED);
Expression* expression =
- NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
+ NewThrowTypeError(isolate()->factory()->redeclaration_string(),
type_string, name);
declaration_scope->SetIllegalRedeclaration(expression);
}
@@ -1795,8 +1804,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
// Get the context before the debugger was entered.
SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ while (save != NULL &&
+ *save->context() == *isolate_->debug()->debug_context()) {
save = save->prev();
+ }
global = Handle<GlobalObject>(save->context()->global_object());
}
@@ -1994,8 +2005,8 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
bool Parser::IsEvalOrArguments(Handle<String> string) {
- return string.is_identical_to(isolate()->factory()->eval_symbol()) ||
- string.is_identical_to(isolate()->factory()->arguments_symbol());
+ return string.is_identical_to(isolate()->factory()->eval_string()) ||
+ string.is_identical_to(isolate()->factory()->arguments_string());
}
@@ -2255,7 +2266,7 @@ Block* Parser::ParseVariableDeclarations(
// Note that the function does different things depending on
// the number of arguments (1 or 2).
initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeConstGlobal_symbol(),
+ isolate()->factory()->InitializeConstGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
arguments);
} else {
@@ -2284,7 +2295,7 @@ Block* Parser::ParseVariableDeclarations(
// Note that the function does different things depending on
// the number of arguments (2 or 3).
initialize = factory()->NewCallRuntime(
- isolate()->factory()->InitializeVarGlobal_symbol(),
+ isolate()->factory()->InitializeVarGlobal_string(),
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
arguments);
}
@@ -2396,7 +2407,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
expr != NULL &&
expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->native_symbol()) &&
+ isolate()->heap()->native_string()) &&
!scanner().literal_contains_escapes()) {
return ParseNativeDeclaration(ok);
}
@@ -2408,7 +2419,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
scanner().HasAnyLineTerminatorBeforeNext() ||
expr->AsVariableProxy() == NULL ||
!expr->AsVariableProxy()->name()->Equals(
- isolate()->heap()->module_symbol()) ||
+ isolate()->heap()->module_string()) ||
scanner().literal_contains_escapes()) {
ExpectSemicolon(CHECK_OK);
}
@@ -2533,7 +2544,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Scope* declaration_scope = top_scope_->DeclarationScope();
if (declaration_scope->is_global_scope() ||
declaration_scope->is_eval_scope()) {
- Handle<String> type = isolate()->factory()->illegal_return_symbol();
+ Handle<String> type = isolate()->factory()->illegal_return_string();
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return factory()->NewExpressionStatement(throw_error);
}
@@ -2881,8 +2892,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// implementing stack allocated block scoped variables.
Factory* heap_factory = isolate()->factory();
Handle<String> tempstr =
- heap_factory->NewConsString(heap_factory->dot_for_symbol(), name);
- Handle<String> tempname = heap_factory->LookupSymbol(tempstr);
+ heap_factory->NewConsString(heap_factory->dot_for_string(), name);
+ Handle<String> tempname = heap_factory->InternalizeString(tempstr);
Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
ForInStatement* loop = factory()->NewForInStatement(labels);
@@ -2926,7 +2937,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
// the error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
Handle<String> type =
- isolate()->factory()->invalid_lhs_in_for_in_symbol();
+ isolate()->factory()->invalid_lhs_in_for_in_string();
expression = NewThrowReferenceError(type);
}
ForInStatement* loop = factory()->NewForInStatement(labels);
@@ -3037,9 +3048,10 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// side expression. We could report this as a syntax error here but
// for compatibility with JSC we choose to report the error at
// runtime.
+ // TODO(ES5): Should change parsing for spec conformance.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
Handle<String> type =
- isolate()->factory()->invalid_lhs_in_assignment_symbol();
+ isolate()->factory()->invalid_lhs_in_assignment_string();
expression = NewThrowReferenceError(type);
}
@@ -3240,7 +3252,8 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->ToBoolean()->IsTrue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
+ Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
+ isolate());
return factory()->NewLiteral(result);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
@@ -3279,7 +3292,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
Handle<String> type =
- isolate()->factory()->invalid_lhs_in_prefix_op_symbol();
+ isolate()->factory()->invalid_lhs_in_prefix_op_string();
expression = NewThrowReferenceError(type);
}
@@ -3314,7 +3327,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// error at runtime.
if (expression == NULL || !expression->IsValidLeftHandSide()) {
Handle<String> type =
- isolate()->factory()->invalid_lhs_in_postfix_op_symbol();
+ isolate()->factory()->invalid_lhs_in_postfix_op_string();
expression = NewThrowReferenceError(type);
}
@@ -3389,7 +3402,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) {
// they are actually direct calls to eval is determined at run time.
VariableProxy* callee = result->AsVariableProxy();
if (callee != NULL &&
- callee->IsVariable(isolate()->factory()->eval_symbol())) {
+ callee->IsVariable(isolate()->factory()->eval_string())) {
top_scope_->DeclarationScope()->RecordEvalCall();
}
result = factory()->NewCall(result, args, pos);
@@ -3496,7 +3509,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
fni_->PushLiteralName(index->AsLiteral()->AsPropertyName());
} else {
fni_->PushLiteralName(
- isolate()->factory()->anonymous_function_symbol());
+ isolate()->factory()->anonymous_function_string());
}
}
Expect(Token::RBRACK, CHECK_OK);
@@ -3748,17 +3761,16 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
int literal_index = current_function_state_->NextMaterializedLiteralIndex();
// Allocate a fixed array to hold all the object literals.
- Handle<FixedArray> object_literals =
- isolate()->factory()->NewFixedArray(values->length(), TENURED);
- Handle<FixedDoubleArray> double_literals;
- ElementsKind elements_kind = FAST_SMI_ELEMENTS;
- bool has_only_undefined_values = true;
- bool has_hole_values = false;
+ Handle<JSArray> array =
+ isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
+ isolate()->factory()->SetElementsCapacityAndLength(
+ array, values->length(), values->length());
// Fill in the literals.
Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
+ bool is_holey = false;
for (int i = 0, n = values->length(); i < n; i++) {
MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
if (m_literal != NULL && m_literal->depth() + 1 > depth) {
@@ -3766,91 +3778,33 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsTheHole()) {
- has_hole_values = true;
- object_literals->set_the_hole(i);
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- double_literals->set_the_hole(i);
- }
+ is_holey = true;
} else if (boilerplate_value->IsUndefined()) {
is_simple = false;
- object_literals->set(i, Smi::FromInt(0));
- if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- double_literals->set(i, 0);
- }
+ JSObject::SetOwnElement(
+ array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
} else {
- // Examine each literal element, and adjust the ElementsKind if the
- // literal element is not of a type that can be stored in the current
- // ElementsKind. Start with FAST_SMI_ONLY_ELEMENTS, and transition to
- // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
- // the tagged value, no matter what the ElementsKind is in case we
- // ultimately end up in FAST_ELEMENTS.
- has_only_undefined_values = false;
- object_literals->set(i, *boilerplate_value);
- if (elements_kind == FAST_SMI_ELEMENTS) {
- // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
- // FAST_ELEMENTS is required.
- if (!boilerplate_value->IsSmi()) {
- if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
- // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
- // avoid over-allocating in TENURED space.
- double_literals = isolate()->factory()->NewFixedDoubleArray(
- values->length(), TENURED);
- // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
- // FAST_DOUBLE_ELEMENTS array so that they are in sync.
- for (int j = 0; j < i; ++j) {
- Object* smi_value = object_literals->get(j);
- if (smi_value->IsTheHole()) {
- double_literals->set_the_hole(j);
- } else {
- double_literals->set(j, Smi::cast(smi_value)->value());
- }
- }
- double_literals->set(i, boilerplate_value->Number());
- elements_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
- } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
- // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
- // until the first value is seen that can't be stored as a double.
- if (boilerplate_value->IsNumber()) {
- double_literals->set(i, boilerplate_value->Number());
- } else {
- elements_kind = FAST_ELEMENTS;
- }
- }
+ JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
}
}
- // Very small array literals that don't have a concrete hint about their type
- // from a constant value should default to the slow case to avoid lots of
- // elements transitions on really small objects.
- if (has_only_undefined_values && values->length() <= 2) {
- elements_kind = FAST_ELEMENTS;
- }
+ Handle<FixedArrayBase> element_values(array->elements());
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
- elements_kind != FAST_DOUBLE_ELEMENTS) {
- object_literals->set_map(heap->fixed_cow_array_map());
+ array->HasFastSmiOrObjectElements()) {
+ element_values->set_map(heap->fixed_cow_array_map());
}
- Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
- ? Handle<FixedArrayBase>(double_literals)
- : Handle<FixedArrayBase>(object_literals);
-
// Remember both the literal's constant values as well as the ElementsKind
// in a 2-element FixedArray.
- Handle<FixedArray> literals =
- isolate()->factory()->NewFixedArray(2, TENURED);
+ Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
- if (has_hole_values || !FLAG_packed_arrays) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
+ ElementsKind kind = array->GetElementsKind();
+ kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
- literals->set(0, Smi::FromInt(elements_kind));
+ literals->set(0, Smi::FromInt(kind));
literals->set(1, *element_values);
return factory()->NewArrayLiteral(
@@ -4077,7 +4031,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter,
next == Token::STRING || is_keyword) {
Handle<String> name;
if (is_keyword) {
- name = isolate_->factory()->LookupAsciiSymbol(Token::String(next));
+ name = isolate_->factory()->InternalizeUtf8String(Token::String(next));
} else {
name = GetSymbol(CHECK_OK);
}
@@ -4398,7 +4352,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
// We want a non-null handle as the function name.
if (should_infer_name) {
- function_name = isolate()->factory()->empty_symbol();
+ function_name = isolate()->factory()->empty_string();
}
int num_parameters = 0;
@@ -4704,7 +4658,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
top_scope_->DeclarationScope()->ForceEagerCompilation();
}
- const Runtime::Function* function = Runtime::FunctionForSymbol(name);
+ const Runtime::Function* function = Runtime::FunctionForName(name);
// Check for built-in IS_VAR macro.
if (function != NULL &&
@@ -4799,7 +4753,7 @@ void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) {
if (!*ok) return;
Handle<String> symbol = GetSymbol(ok);
if (!*ok) return;
- if (!symbol->IsEqualTo(CStrVector(keyword))) {
+ if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) {
*ok = false;
ReportUnexpectedToken(scanner().current_token());
}
@@ -4991,7 +4945,7 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) {
Expression* Parser::NewThrowReferenceError(Handle<String> type) {
- return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(),
+ return NewThrowError(isolate()->factory()->MakeReferenceError_string(),
type, HandleVector<Object>(NULL, 0));
}
@@ -5001,7 +4955,7 @@ Expression* Parser::NewThrowSyntaxError(Handle<String> type,
int argc = first.is_null() ? 0 : 1;
Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc);
return NewThrowError(
- isolate()->factory()->MakeSyntaxError_symbol(), type, arguments);
+ isolate()->factory()->MakeSyntaxError_string(), type, arguments);
}
@@ -5013,7 +4967,7 @@ Expression* Parser::NewThrowTypeError(Handle<String> type,
Vector< Handle<Object> > arguments =
HandleVector<Object>(elements, ARRAY_SIZE(elements));
return NewThrowError(
- isolate()->factory()->MakeTypeError_symbol(), type, arguments);
+ isolate()->factory()->MakeTypeError_string(), type, arguments);
}
diff --git a/src/3rdparty/v8/src/parser.h b/src/3rdparty/v8/src/parser.h
index 93fd1b8..0f85f91 100644
--- a/src/3rdparty/v8/src/parser.h
+++ b/src/3rdparty/v8/src/parser.h
@@ -96,7 +96,6 @@ class FunctionEntry BASE_EMBEDDED {
private:
Vector<unsigned> backing_;
- bool owns_data_;
};
diff --git a/src/3rdparty/v8/src/platform-cygwin.cc b/src/3rdparty/v8/src/platform-cygwin.cc
index 24e256a..f7e7d5e 100644
--- a/src/3rdparty/v8/src/platform-cygwin.cc
+++ b/src/3rdparty/v8/src/platform-cygwin.cc
@@ -177,6 +177,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
@@ -655,23 +660,12 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
OS::Sleep(interval_);
}
@@ -685,11 +679,6 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
@@ -724,7 +713,6 @@ class SamplerThread : public Thread {
}
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -788,4 +776,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-freebsd.cc b/src/3rdparty/v8/src/platform-freebsd.cc
index 1da4605..1af928e 100644
--- a/src/3rdparty/v8/src/platform-freebsd.cc
+++ b/src/3rdparty/v8/src/platform-freebsd.cc
@@ -181,6 +181,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
@@ -198,6 +203,31 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ fprintf(stderr, "(empty)\n");
+ } else if (symbols == NULL) {
+ fprintf(stderr, "(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ fprintf(stderr, "%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ fprintf(stderr, "%s\n", mangled);
+ } else {
+ fprintf(stderr, "??\n");
+ }
+ }
+ }
+ fflush(stderr);
+ free(symbols);
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -712,11 +742,6 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -767,38 +792,14 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
@@ -808,21 +809,15 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
@@ -837,7 +832,6 @@ class SignalSender : public Thread {
}
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -904,4 +898,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-linux.cc b/src/3rdparty/v8/src/platform-linux.cc
index e6c328f..f571b99 100644
--- a/src/3rdparty/v8/src/platform-linux.cc
+++ b/src/3rdparty/v8/src/platform-linux.cc
@@ -38,6 +38,11 @@
#include <sys/types.h>
#include <stdlib.h>
+#if defined(__GLIBC__)
+#include <execinfo.h>
+#include <cxxabi.h>
+#endif
+
// Ubuntu Dapper requires memory pages to be marked as
// executable. Otherwise, OS raises an exception when executing code
// in that page.
@@ -151,10 +156,17 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
case SUDIV:
search_string = "idiva";
break;
+ case VFP32DREGS:
+ // This case is handled specially below.
+ break;
default:
UNREACHABLE();
}
+ if (feature == VFP32DREGS) {
+ return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16");
+ }
+
if (CPUInfoContainsString(search_string)) {
return true;
}
@@ -191,6 +203,7 @@ CpuImplementer OS::GetCpuImplementer() {
return cached_value;
}
+
bool OS::ArmUsingHardFloat() {
// GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
// the Floating Point ABI used (PCS stands for Procedure Call Standard).
@@ -390,6 +403,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
if (FLAG_break_on_abort) {
@@ -414,6 +432,37 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+#if defined(__GLIBC__)
+ void* trace[100];
+ int size = backtrace(trace, ARRAY_SIZE(trace));
+ char** symbols = backtrace_symbols(trace, size);
+ fprintf(stderr, "\n==== C stack trace ===============================\n\n");
+ if (size == 0) {
+ fprintf(stderr, "(empty)\n");
+ } else if (symbols == NULL) {
+ fprintf(stderr, "(no symbols)\n");
+ } else {
+ for (int i = 1; i < size; ++i) {
+ fprintf(stderr, "%2d: ", i);
+ char mangled[201];
+ if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
+ int status;
+ size_t length;
+ char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+ fprintf(stderr, "%s\n", demangled ? demangled : mangled);
+ free(demangled);
+ } else {
+ fprintf(stderr, "??\n");
+ }
+ }
+ }
+ fflush(stderr);
+ free(symbols);
+#endif
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -498,19 +547,20 @@ void OS::LogSharedLibraryAddresses() {
// the beginning of the filename or the end of the line.
do {
c = getc(fp);
- } while ((c != EOF) && (c != '\n') && (c != '/'));
+ } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
if (c == EOF) break; // EOF: Was unexpected, just exit.
// Process the filename if found.
- if (c == '/') {
- ungetc(c, fp); // Push the '/' back into the stream to be read below.
+ if ((c == '/') || (c == '[')) {
+ // Push the '/' or '[' back into the stream to be read below.
+ ungetc(c, fp);
// Read to the end of the line. Exit if the read fails.
if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
// Drop the newline character read by fgets. We do not need to check
// for a zero-length string because we know that we at least read the
- // '/' character.
+ // '/' or '[' character.
lib_name[strlen(lib_name) - 1] = '\0';
} else {
// No library name found, just record the raw address range.
@@ -1077,34 +1127,27 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
}
-class Sampler::PlatformData : public Malloced {
+class CpuProfilerSignalHandler {
public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- int vm_tid() const { return vm_tid_; }
-
- private:
- const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
static void TearDown() { delete mutex_; }
+ static bool RegisterProfilingSampler() {
+ ScopedLock lock(mutex_);
+ if (!profiling_samplers_count_) InstallSignalHandler();
+ ++profiling_samplers_count_;
+ return signal_handler_installed_;
+ }
+
+ static void UnregisterProfilingSampler() {
+ ScopedLock lock(mutex_);
+ ASSERT(profiling_samplers_count_ > 0);
+ if (!profiling_samplers_count_) return;
+ if (profiling_samplers_count_ == 1) RestoreSignalHandler();
+ --profiling_samplers_count_;
+ }
+
+ private:
static void InstallSignalHandler() {
struct sigaction sa;
sa.sa_sigaction = ProfilerSignalHandler;
@@ -1121,6 +1164,61 @@ class SignalSender : public Thread {
}
}
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static int profiling_samplers_count_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+};
+
+
+Mutex* CpuProfilerSignalHandler::mutex_ = NULL;
+int CpuProfilerSignalHandler::profiling_samplers_count_ = 0;
+bool CpuProfilerSignalHandler::signal_handler_installed_ = false;
+struct sigaction CpuProfilerSignalHandler::old_signal_handler_;
+
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData()
+ : vm_tgid_(getpid()),
+ vm_tid_(GetThreadID()),
+ signal_handler_installed_(false) {}
+
+ void set_signal_handler_installed(bool value) {
+ signal_handler_installed_ = value;
+ }
+
+ void SendProfilingSignal() {
+ if (!signal_handler_installed_) return;
+ // Glibc doesn't provide a wrapper for tgkill(2).
+#if defined(ANDROID)
+ syscall(__NR_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+#else
+ int result = syscall(SYS_tgkill, vm_tgid_, vm_tid_, SIGPROF);
+ USE(result);
+ ASSERT(result == 0);
+#endif
+ }
+
+ private:
+ const int vm_tgid_;
+ const int vm_tid_;
+ bool signal_handler_installed_;
+};
+
+
+class SignalSender : public Thread {
+ public:
+ static const int kSignalSenderStackSize = 64 * KB;
+
+ explicit SignalSender(int interval)
+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
+ interval_(interval) {}
+
+ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+ static void TearDown() { delete mutex_; }
+
static void AddActiveSampler(Sampler* sampler) {
ScopedLock lock(mutex_);
SamplerRegistry::AddActiveSampler(sampler);
@@ -1141,7 +1239,6 @@ class SignalSender : public Thread {
RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
delete instance_;
instance_ = NULL;
- RestoreSignalHandler();
}
}
@@ -1150,72 +1247,26 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
-#endif
+ sampler->DoSample();
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
#if defined(ANDROID)
usleep(interval);
#else
@@ -1233,14 +1284,11 @@ class SignalSender : public Thread {
#endif // ANDROID
}
- const int vm_tgid_;
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
static SignalSender* instance_;
- static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
private:
@@ -1251,7 +1299,6 @@ class SignalSender : public Thread {
Mutex* SignalSender::mutex_ = NULL;
SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
void OS::SetUp() {
@@ -1279,10 +1326,12 @@ void OS::SetUp() {
}
#endif
SignalSender::SetUp();
+ CpuProfilerSignalHandler::SetUp();
}
void OS::TearDown() {
+ CpuProfilerSignalHandler::TearDown();
SignalSender::TearDown();
delete limit_mutex;
}
@@ -1292,6 +1341,7 @@ Sampler::Sampler(Isolate* isolate, int interval)
: isolate_(isolate),
interval_(interval),
profiling_(false),
+ has_processing_thread_(false),
active_(false),
samples_taken_(0) {
data_ = new PlatformData;
@@ -1318,4 +1368,26 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return true;
+}
+
+
+void Sampler::DoSample() {
+ platform_data()->SendProfilingSignal();
+}
+
+
+void Sampler::StartProfiling() {
+ platform_data()->set_signal_handler_installed(
+ CpuProfilerSignalHandler::RegisterProfilingSampler());
+}
+
+
+void Sampler::StopProfiling() {
+ CpuProfilerSignalHandler::UnregisterProfilingSampler();
+ platform_data()->set_signal_handler_installed(false);
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-macos.cc b/src/3rdparty/v8/src/platform-macos.cc
index 22d2bcf..7913981 100644
--- a/src/3rdparty/v8/src/platform-macos.cc
+++ b/src/3rdparty/v8/src/platform-macos.cc
@@ -171,6 +171,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination
abort();
@@ -182,6 +187,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -787,23 +797,12 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
OS::Sleep(interval_);
}
@@ -817,11 +816,6 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
@@ -867,7 +861,6 @@ class SamplerThread : public Thread {
}
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -929,4 +922,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-nullos.cc b/src/3rdparty/v8/src/platform-nullos.cc
index ccd2123..20d8801 100644
--- a/src/3rdparty/v8/src/platform-nullos.cc
+++ b/src/3rdparty/v8/src/platform-nullos.cc
@@ -266,6 +266,12 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
void OS::Abort() {
// Minimalistic implementation for bootstrapping.
abort();
@@ -277,6 +283,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
UNIMPLEMENTED();
return NULL;
@@ -514,4 +525,25 @@ void ProfileSampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+void Sampler::DoSample() {
+ UNIMPLEMENTED();
+}
+
+
+void Sampler::StartProfiling() {
+ UNIMPLEMENTED();
+}
+
+
+void Sampler::StopProfiling() {
+ UNIMPLEMENTED();
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-openbsd.cc b/src/3rdparty/v8/src/platform-openbsd.cc
index 292927b..ccccedc 100644
--- a/src/3rdparty/v8/src/platform-openbsd.cc
+++ b/src/3rdparty/v8/src/platform-openbsd.cc
@@ -204,6 +204,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
@@ -215,6 +220,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -784,11 +794,6 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -844,43 +849,16 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (signal_handler_installed_) RestoreSignalHandler();
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
@@ -890,21 +868,15 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
@@ -920,7 +892,6 @@ class SignalSender : public Thread {
const int vm_tgid_;
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -984,4 +955,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-posix.cc b/src/3rdparty/v8/src/platform-posix.cc
index 3bc8373..0016d59 100644
--- a/src/3rdparty/v8/src/platform-posix.cc
+++ b/src/3rdparty/v8/src/platform-posix.cc
@@ -142,11 +142,19 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
#undef MATH_FUNCTION
+void lazily_initialize_fast_exp() {
+ if (fast_exp_function == NULL) {
+ init_fast_exp_function();
+ }
+}
+
+
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
@@ -332,6 +340,7 @@ void POSIXPostSetUp() {
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
+ // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
diff --git a/src/3rdparty/v8/src/platform-qnx.cc b/src/3rdparty/v8/src/platform-qnx.cc
index e535756..83946f9 100644
--- a/src/3rdparty/v8/src/platform-qnx.cc
+++ b/src/3rdparty/v8/src/platform-qnx.cc
@@ -134,6 +134,10 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
return (SYSPAGE_ENTRY(cpuinfo)->flags & ARM_CPU_FLAG_V7) != 0;
case SUDIV:
return CPUInfoContainsString("idiva");
+ case VFP32DREGS:
+ // We could even return true here, shipping devices have all
+ // 32 double-precision registers afaik.
+ return !CPUInfoContainsString("d16");
default:
UNREACHABLE();
}
@@ -292,6 +296,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
@@ -386,7 +395,7 @@ void OS::LogSharedLibraryAddresses() {
return;
}
- /* Get the number of map entries. */
+ /* Get the number of map entrys. */
if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
close(proc_fd);
return;
@@ -398,7 +407,7 @@ void OS::LogSharedLibraryAddresses() {
return;
}
- /* Fill the map entries. */
+ /* Fill the map entrys. */
if (devctl(proc_fd, DCMD_PROC_PAGEDATA, mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
free(mapinfos);
close(proc_fd);
@@ -412,10 +421,10 @@ void OS::LogSharedLibraryAddresses() {
if (mapinfo->flags & MAP_ELF) {
map.info.vaddr = mapinfo->vaddr;
if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK)
- continue;
+ continue;
- LOG(isolate, SharedLibraryEvent(map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
- }
+ LOG(isolate, SharedLibraryEvent(map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+ }
}
free(mapinfos);
close(proc_fd);
@@ -875,11 +884,6 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 32 * KB;
explicit SignalSender(int interval)
@@ -935,43 +939,16 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (signal_handler_installed_) RestoreSignalHandler();
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
@@ -981,21 +958,15 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
- // occurring during signal delivery.
+ // occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
@@ -1011,7 +982,6 @@ class SignalSender : public Thread {
const int vm_tgid_;
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -1028,6 +998,9 @@ SignalSender* SignalSender::instance_ = NULL;
struct sigaction SignalSender::old_signal_handler_;
bool SignalSender::signal_handler_installed_ = false;
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
@@ -1093,4 +1066,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-solaris.cc b/src/3rdparty/v8/src/platform-solaris.cc
index 5652741..88d197f 100644
--- a/src/3rdparty/v8/src/platform-solaris.cc
+++ b/src/3rdparty/v8/src/platform-solaris.cc
@@ -191,6 +191,11 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ return sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+
void OS::Abort() {
// Redirect to std abort to signal abnormal program termination.
abort();
@@ -202,6 +207,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -701,11 +711,6 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -760,44 +765,16 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
-
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (signal_handler_installed_) RestoreSignalHandler();
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
}
}
@@ -807,21 +784,15 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
@@ -836,7 +807,6 @@ class SignalSender : public Thread {
}
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -902,4 +872,22 @@ void Sampler::Stop() {
SetActive(false);
}
+
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform-win32.cc b/src/3rdparty/v8/src/platform-win32.cc
index ae9ab2a..2383fad 100644
--- a/src/3rdparty/v8/src/platform-win32.cc
+++ b/src/3rdparty/v8/src/platform-win32.cc
@@ -227,11 +227,19 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
+UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
#undef UNARY_MATH_FUNCTION
+void lazily_initialize_fast_exp() {
+ if (fast_exp_function == NULL) {
+ init_fast_exp_function();
+ }
+}
+
+
void MathSetup() {
#ifdef _WIN64
init_modulo_function();
@@ -240,6 +248,7 @@ void MathSetup() {
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
+ // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
@@ -1025,6 +1034,13 @@ void OS::Sleep(int milliseconds) {
}
+int OS::NumberOfCores() {
+ SYSTEM_INFO info;
+ GetSystemInfo(&info);
+ return info.dwNumberOfProcessors;
+}
+
+
void OS::Abort() {
if (IsDebuggerPresent() || FLAG_break_on_abort) {
DebugBreak();
@@ -1048,6 +1064,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
class Win32MemoryMappedFile : public OS::MemoryMappedFile {
public:
Win32MemoryMappedFile(HANDLE file,
@@ -1527,8 +1548,9 @@ int OS::ActivationFrameAlignment() {
// With gcc 4.4 the tree vectorization optimizer can generate code
// that requires 16 byte alignment such as movdqa on x86.
return 16;
-#endif
+#else
return 8; // Floating-point math runs faster with 8-byte alignment.
+#endif
}
@@ -2106,23 +2128,12 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
- return;
- }
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
+ if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
}
OS::Sleep(interval_);
}
@@ -2136,11 +2147,6 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
-
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
@@ -2182,7 +2188,6 @@ class SamplerThread : public Thread {
}
const int interval_;
- RuntimeProfilerRateLimiter rate_limiter_;
// Protects the process wide state below.
static Mutex* mutex_;
@@ -2246,4 +2251,21 @@ void Sampler::Stop() {
}
+bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
+ return false;
+}
+
+
+void Sampler::DoSample() {
+}
+
+
+void Sampler::StartProfiling() {
+}
+
+
+void Sampler::StopProfiling() {
+}
+
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/platform.h b/src/3rdparty/v8/src/platform.h
index f3ab08d..bf1a1dc 100644
--- a/src/3rdparty/v8/src/platform.h
+++ b/src/3rdparty/v8/src/platform.h
@@ -119,12 +119,16 @@ class Mutex;
double ceiling(double x);
double modulo(double x, double y);
-// Custom implementation of sin, cos, tan and log.
+// Custom implementation of math functions.
double fast_sin(double input);
double fast_cos(double input);
double fast_tan(double input);
double fast_log(double input);
+double fast_exp(double input);
double fast_sqrt(double input);
+// The custom exp implementation needs 16KB of lookup data; initialize it
+// on demand.
+void lazily_initialize_fast_exp();
// Forward declarations.
class Socket;
@@ -235,12 +239,17 @@ class OS {
// Sleep for a number of milliseconds.
static void Sleep(const int milliseconds);
+ static int NumberOfCores();
+
// Abort the current process.
static void Abort();
// Debug break.
static void DebugBreak();
+ // Dump C++ current stack trace (only functional on Linux).
+ static void DumpBacktrace();
+
// Walk the stack.
static const int kStackWalkError = -1;
static const int kStackWalkMaxNameLen = 256;
@@ -757,10 +766,17 @@ class Sampler {
void Start();
void Stop();
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ // Whether the sampling thread should use this Sampler for CPU profiling?
+ bool IsProfiling() const {
+ return NoBarrier_Load(&profiling_) > 0 &&
+ !NoBarrier_Load(&has_processing_thread_);
+ }
+ void IncreaseProfilingDepth() {
+ if (NoBarrier_AtomicIncrement(&profiling_, 1) == 1) StartProfiling();
+ }
+ void DecreaseProfilingDepth() {
+ if (!NoBarrier_AtomicIncrement(&profiling_, -1)) StopProfiling();
+ }
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
@@ -776,6 +792,14 @@ class Sampler {
PlatformData* platform_data() { return data_; }
+ // If true next sample must be initiated on the profiler event processor
+ // thread right after latest sample is processed.
+ static bool CanSampleOnProfilerEventsProcessorThread();
+ void DoSample();
+ void SetHasProcessingThread(bool value) {
+ NoBarrier_Store(&has_processing_thread_, value);
+ }
+
protected:
virtual void DoSampleStack(TickSample* sample) = 0;
@@ -783,9 +807,15 @@ class Sampler {
void SetActive(bool value) { NoBarrier_Store(&active_, value); }
void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
+ // Perform platform-specific initialization before DoSample() may be invoked.
+ void StartProfiling();
+ // Perform platform-specific cleanup after profiling.
+ void StopProfiling();
+
Isolate* isolate_;
const int interval_;
Atomic32 profiling_;
+ Atomic32 has_processing_thread_;
Atomic32 active_;
PlatformData* data_; // Platform specific data.
int samples_taken_; // Counts stack samples taken.
diff --git a/src/3rdparty/v8/src/preparse-data.cc b/src/3rdparty/v8/src/preparse-data.cc
index 98c343e..d0425b4 100644
--- a/src/3rdparty/v8/src/preparse-data.cc
+++ b/src/3rdparty/v8/src/preparse-data.cc
@@ -113,7 +113,7 @@ CompleteParserRecorder::CompleteParserRecorder()
literal_chars_(0),
symbol_store_(0),
symbol_keys_(0),
- symbol_table_(vector_compare),
+ string_table_(vector_compare),
symbol_id_(0) {
}
@@ -123,7 +123,7 @@ void CompleteParserRecorder::LogSymbol(int start,
bool is_ascii,
Vector<const byte> literal_bytes) {
Key key = { is_ascii, literal_bytes };
- HashMap::Entry* entry = symbol_table_.Lookup(&key, hash, true);
+ HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
if (id == 0) {
// Copy literal contents for later comparison.
diff --git a/src/3rdparty/v8/src/preparse-data.h b/src/3rdparty/v8/src/preparse-data.h
index f347430..3a1e99d 100644
--- a/src/3rdparty/v8/src/preparse-data.h
+++ b/src/3rdparty/v8/src/preparse-data.h
@@ -221,7 +221,7 @@ class CompleteParserRecorder: public FunctionLoggingParserRecorder {
Collector<byte> literal_chars_;
Collector<byte> symbol_store_;
Collector<Key> symbol_keys_;
- HashMap symbol_table_;
+ HashMap string_table_;
int symbol_id_;
};
diff --git a/src/3rdparty/v8/src/preparser.h b/src/3rdparty/v8/src/preparser.h
index 13261f7..ad52d74 100644
--- a/src/3rdparty/v8/src/preparser.h
+++ b/src/3rdparty/v8/src/preparser.h
@@ -150,11 +150,11 @@ class PreParser {
// Parses a single function literal, from the opening parentheses before
// parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the funciton in enough
+ // Returns a FunctionEntry describing the body of the function in enough
// detail that it can be lazily compiled.
// The scanner is expected to have matched the "function" keyword and
// parameters, and have consumed the initial '{'.
- // At return, unless an error occured, the scanner is positioned before the
+ // At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
PreParseResult PreParseLazyFunction(i::LanguageMode mode,
i::ParserRecorder* log);
diff --git a/src/3rdparty/v8/src/prettyprinter.cc b/src/3rdparty/v8/src/prettyprinter.cc
index 16eb85a..c339583 100644
--- a/src/3rdparty/v8/src/prettyprinter.cc
+++ b/src/3rdparty/v8/src/prettyprinter.cc
@@ -42,6 +42,7 @@ PrettyPrinter::PrettyPrinter() {
output_ = NULL;
size_ = 0;
pos_ = 0;
+ InitializeAstVisitor();
}
@@ -122,6 +123,14 @@ void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
}
+void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) {
+ Print("module ");
+ PrintLiteral(node->proxy()->name(), false);
+ Print(" ");
+ Visit(node->body());
+}
+
+
void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
Print(";");
@@ -353,7 +362,7 @@ void PrettyPrinter::VisitThrow(Throw* node) {
void PrettyPrinter::VisitProperty(Property* node) {
Expression* key = node->key();
Literal* literal = key->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
+ if (literal != NULL && literal->handle()->IsInternalizedString()) {
Print("(");
Visit(node->obj());
Print(").");
@@ -825,6 +834,13 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
}
+void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
+ IndentedScope indent(this, "MODULE");
+ PrintLiteralIndented("NAME", node->proxy()->name(), true);
+ PrintStatements(node->body()->statements());
+}
+
+
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
@@ -1055,7 +1071,7 @@ void AstPrinter::VisitProperty(Property* node) {
IndentedScope indent(this, "PROPERTY", node);
Visit(node->obj());
Literal* literal = node->key()->AsLiteral();
- if (literal != NULL && literal->handle()->IsSymbol()) {
+ if (literal != NULL && literal->handle()->IsInternalizedString()) {
PrintLiteralIndented("NAME", literal->handle(), false);
} else {
PrintIndentedVisit("KEY", node->key());
diff --git a/src/3rdparty/v8/src/prettyprinter.h b/src/3rdparty/v8/src/prettyprinter.h
index 9ac7257..41175ab 100644
--- a/src/3rdparty/v8/src/prettyprinter.h
+++ b/src/3rdparty/v8/src/prettyprinter.h
@@ -74,6 +74,8 @@ class PrettyPrinter: public AstVisitor {
void PrintDeclarations(ZoneList<Declaration*>* declarations);
void PrintFunctionLiteral(FunctionLiteral* function);
void PrintCaseClause(CaseClause* clause);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
diff --git a/src/3rdparty/v8/src/profile-generator-inl.h b/src/3rdparty/v8/src/profile-generator-inl.h
index 02e146f..cbdb6dd 100644
--- a/src/3rdparty/v8/src/profile-generator-inl.h
+++ b/src/3rdparty/v8/src/profile-generator-inl.h
@@ -84,7 +84,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
return gc_entry_;
case JS:
case COMPILER:
- case PARALLEL_COMPILER_PROLOGUE:
+ case PARALLEL_COMPILER:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
@@ -95,55 +95,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
}
-
-HeapEntry* HeapGraphEdge::from() const {
- return &snapshot()->entries()[from_index_];
-}
-
-
-HeapSnapshot* HeapGraphEdge::snapshot() const {
- return to_entry_->snapshot();
-}
-
-
-int HeapEntry::index() const {
- return static_cast<int>(this - &snapshot_->entries().first());
-}
-
-
-int HeapEntry::set_children_index(int index) {
- children_index_ = index;
- int next_index = index + children_count_;
- children_count_ = 0;
- return next_index;
-}
-
-
-HeapGraphEdge** HeapEntry::children_arr() {
- ASSERT(children_index_ >= 0);
- return &snapshot_->children()[children_index_];
-}
-
-
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
- return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
- return reinterpret_cast<HeapObject*>(
- reinterpret_cast<char*>(kFirstGcSubrootObject) +
- delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
- return static_cast<int>(
- (reinterpret_cast<char*>(subroot) -
- reinterpret_cast<char*>(kFirstGcSubrootObject)) /
- HeapObjectsMap::kObjectIdStep);
-}
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/src/3rdparty/v8/src/profile-generator.cc b/src/3rdparty/v8/src/profile-generator.cc
index 9839edf..e5b5194 100644
--- a/src/3rdparty/v8/src/profile-generator.cc
+++ b/src/3rdparty/v8/src/profile-generator.cc
@@ -30,7 +30,6 @@
#include "profile-generator-inl.h"
#include "global-handles.h"
-#include "heap-profiler.h"
#include "scopeinfo.h"
#include "unicode.h"
#include "zone-inl.h"
@@ -66,7 +65,9 @@ int TokenEnumerator::GetTokenId(Object* token) {
Handle<Object> handle = isolate->global_handles()->Create(token);
// handle.location() points to a memory cell holding a pointer
// to a token object in the V8's heap.
- isolate->global_handles()->MakeWeak(handle.location(), this,
+ isolate->global_handles()->MakeWeak(handle.location(),
+ this,
+ NULL,
TokenRemovedCallback);
token_locations_.Add(handle.location());
token_removed_.Add(false);
@@ -74,11 +75,12 @@ int TokenEnumerator::GetTokenId(Object* token) {
}
-void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
void* parameter) {
reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved(
Utils::OpenHandle(*handle).location());
- handle.Dispose();
+ handle.Dispose(isolate);
}
@@ -112,7 +114,7 @@ const char* StringsStorage::GetCopy(const char* src) {
OS::StrNCpy(dst, src, len);
dst[len] = '\0';
uint32_t hash =
- HashSequentialString(dst.start(), len, HEAP->HashSeed());
+ StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed());
return AddOrDisposeString(dst.start(), hash);
}
@@ -145,7 +147,7 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
DeleteArray(str.start());
return format;
}
- uint32_t hash = HashSequentialString(
+ uint32_t hash = StringHasher::HashSequentialString(
str.start(), len, HEAP->HashSeed());
return AddOrDisposeString(str.start(), hash);
}
@@ -156,8 +158,8 @@ const char* StringsStorage::GetName(String* name) {
int length = Min(kMaxNameSize, name->length());
SmartArrayPointer<char> data =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length);
- uint32_t hash =
- HashSequentialString(*data, length, name->GetHeap()->HashSeed());
+ uint32_t hash = StringHasher::HashSequentialString(
+ *data, length, name->GetHeap()->HashSeed());
return AddOrDisposeString(data.Detach(), hash);
}
return "";
@@ -940,2647 +942,4 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
}
-HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- name_(name) {
- ASSERT(type == kContextVariable
- || type == kProperty
- || type == kInternal
- || type == kShortcut);
-}
-
-
-HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to)
- : type_(type),
- from_index_(from),
- to_index_(to),
- index_(index) {
- ASSERT(type == kElement || type == kHidden || type == kWeak);
-}
-
-
-void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) {
- to_entry_ = &snapshot->entries()[to_index_];
-}
-
-
-const int HeapEntry::kNoEntry = -1;
-
-HeapEntry::HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size)
- : type_(type),
- children_count_(0),
- children_index_(-1),
- self_size_(self_size),
- id_(id),
- snapshot_(snapshot),
- name_(name) { }
-
-
-void HeapEntry::SetNamedReference(HeapGraphEdge::Type type,
- const char* name,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, name, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type,
- int index,
- HeapEntry* entry) {
- HeapGraphEdge edge(type, index, this->index(), entry->index());
- snapshot_->edges().Add(edge);
- ++children_count_;
-}
-
-
-Handle<HeapObject> HeapEntry::GetHeapObject() {
- return snapshot_->collection()->FindHeapObjectById(id());
-}
-
-
-void HeapEntry::Print(
- const char* prefix, const char* edge_name, int max_depth, int indent) {
- STATIC_CHECK(sizeof(unsigned) == sizeof(id()));
- OS::Print("%6d @%6u %*c %s%s: ",
- self_size(), id(), indent, ' ', prefix, edge_name);
- if (type() != kString) {
- OS::Print("%s %.40s\n", TypeAsString(), name_);
- } else {
- OS::Print("\"");
- const char* c = name_;
- while (*c && (c - name_) <= 40) {
- if (*c != '\n')
- OS::Print("%c", *c);
- else
- OS::Print("\\n");
- ++c;
- }
- OS::Print("\"\n");
- }
- if (--max_depth == 0) return;
- Vector<HeapGraphEdge*> ch = children();
- for (int i = 0; i < ch.length(); ++i) {
- HeapGraphEdge& edge = *ch[i];
- const char* edge_prefix = "";
- EmbeddedVector<char, 64> index;
- const char* edge_name = index.start();
- switch (edge.type()) {
- case HeapGraphEdge::kContextVariable:
- edge_prefix = "#";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kElement:
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kInternal:
- edge_prefix = "$";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kProperty:
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kHidden:
- edge_prefix = "$";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- case HeapGraphEdge::kShortcut:
- edge_prefix = "^";
- edge_name = edge.name();
- break;
- case HeapGraphEdge::kWeak:
- edge_prefix = "w";
- OS::SNPrintF(index, "%d", edge.index());
- break;
- default:
- OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
- }
- edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
- }
-}
-
-
-const char* HeapEntry::TypeAsString() {
- switch (type()) {
- case kHidden: return "/hidden/";
- case kObject: return "/object/";
- case kClosure: return "/closure/";
- case kString: return "/string/";
- case kCode: return "/code/";
- case kArray: return "/array/";
- case kRegExp: return "/regexp/";
- case kHeapNumber: return "/number/";
- case kNative: return "/native/";
- case kSynthetic: return "/synthetic/";
- default: return "???";
- }
-}
-
-
-// It is very important to keep objects that form a heap snapshot
-// as small as possible.
-namespace { // Avoid littering the global namespace.
-
-template <size_t ptr_size> struct SnapshotSizeConstants;
-
-template <> struct SnapshotSizeConstants<4> {
- static const int kExpectedHeapGraphEdgeSize = 12;
- static const int kExpectedHeapEntrySize = 24;
- static const int kExpectedHeapSnapshotsCollectionSize = 96;
- static const int kExpectedHeapSnapshotSize = 136;
- static const size_t kMaxSerializableSnapshotRawSize = 256 * MB;
-};
-
-template <> struct SnapshotSizeConstants<8> {
- static const int kExpectedHeapGraphEdgeSize = 24;
- static const int kExpectedHeapEntrySize = 32;
- static const int kExpectedHeapSnapshotsCollectionSize = 144;
- static const int kExpectedHeapSnapshotSize = 168;
- static const uint64_t kMaxSerializableSnapshotRawSize =
- static_cast<uint64_t>(6000) * MB;
-};
-
-} // namespace
-
-HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
- HeapSnapshot::Type type,
- const char* title,
- unsigned uid)
- : collection_(collection),
- type_(type),
- title_(title),
- uid_(uid),
- root_index_(HeapEntry::kNoEntry),
- gc_roots_index_(HeapEntry::kNoEntry),
- natives_root_index_(HeapEntry::kNoEntry),
- max_snapshot_js_object_id_(0) {
- STATIC_CHECK(
- sizeof(HeapGraphEdge) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize);
- STATIC_CHECK(
- sizeof(HeapEntry) ==
- SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize);
- for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
- gc_subroot_indexes_[i] = HeapEntry::kNoEntry;
- }
-}
-
-
-void HeapSnapshot::Delete() {
- collection_->RemoveSnapshot(this);
- delete this;
-}
-
-
-void HeapSnapshot::RememberLastJSObjectId() {
- max_snapshot_js_object_id_ = collection_->last_assigned_id();
-}
-
-
-HeapEntry* HeapSnapshot::AddRootEntry() {
- ASSERT(root_index_ == HeapEntry::kNoEntry);
- ASSERT(entries_.is_empty()); // Root entry must be the first one.
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "",
- HeapObjectsMap::kInternalRootObjectId,
- 0);
- root_index_ = entry->index();
- ASSERT(root_index_ == 0);
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcRootsEntry() {
- ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
- HeapEntry* entry = AddEntry(HeapEntry::kObject,
- "(GC roots)",
- HeapObjectsMap::kGcRootsObjectId,
- 0);
- gc_roots_index_ = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
- ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
- ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
- HeapEntry* entry = AddEntry(
- HeapEntry::kObject,
- VisitorSynchronization::kTagNames[tag],
- HeapObjectsMap::GetNthGcSubrootId(tag),
- 0);
- gc_subroot_indexes_[tag] = entry->index();
- return entry;
-}
-
-
-HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size) {
- HeapEntry entry(this, type, name, id, size);
- entries_.Add(entry);
- return &entries_.last();
-}
-
-
-void HeapSnapshot::FillChildren() {
- ASSERT(children().is_empty());
- children().Allocate(edges().length());
- int children_index = 0;
- for (int i = 0; i < entries().length(); ++i) {
- HeapEntry* entry = &entries()[i];
- children_index = entry->set_children_index(children_index);
- }
- ASSERT(edges().length() == children_index);
- for (int i = 0; i < edges().length(); ++i) {
- HeapGraphEdge* edge = &edges()[i];
- edge->ReplaceToIndexWithEntry(this);
- edge->from()->add_child(edge);
- }
-}
-
-
-class FindEntryById {
- public:
- explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
- int operator()(HeapEntry* const* entry) {
- if ((*entry)->id() == id_) return 0;
- return (*entry)->id() < id_ ? -1 : 1;
- }
- private:
- SnapshotObjectId id_;
-};
-
-
-HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
- List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
- // Perform a binary search by id.
- int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
- if (index == -1)
- return NULL;
- return entries_by_id->at(index);
-}
-
-
-template<class T>
-static int SortByIds(const T* entry1_ptr,
- const T* entry2_ptr) {
- if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0;
- return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1;
-}
-
-
-List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
- if (sorted_entries_.is_empty()) {
- sorted_entries_.Allocate(entries_.length());
- for (int i = 0; i < entries_.length(); ++i) {
- sorted_entries_[i] = &entries_[i];
- }
- sorted_entries_.Sort(SortByIds);
- }
- return &sorted_entries_;
-}
-
-
-void HeapSnapshot::Print(int max_depth) {
- root()->Print("", "", max_depth, 0);
-}
-
-
-template<typename T, class P>
-static size_t GetMemoryUsedByList(const List<T, P>& list) {
- return list.length() * sizeof(T) + sizeof(list);
-}
-
-
-size_t HeapSnapshot::RawSnapshotSize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize ==
- sizeof(HeapSnapshot)); // NOLINT
- return
- sizeof(*this) +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(edges_) +
- GetMemoryUsedByList(children_) +
- GetMemoryUsedByList(sorted_entries_);
-}
-
-
-// We split IDs on evens for embedder objects (see
-// HeapObjectsMap::GenerateId) and odds for native objects.
-const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1;
-const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId =
- HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
- HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
-const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
- HeapObjectsMap::kGcRootsFirstSubrootId +
- VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
-
-HeapObjectsMap::HeapObjectsMap()
- : next_id_(kFirstAvailableObjectId),
- entries_map_(AddressesMatch) {
- // This dummy element solves a problem with entries_map_.
- // When we do lookup in HashMap we see no difference between two cases:
- // it has an entry with NULL as the value or it has created
- // a new entry on the fly with NULL as the default value.
- // With such dummy element we have a guaranty that all entries_map_ entries
- // will have the value field grater than 0.
- // This fact is using in MoveObject method.
- entries_.Add(EntryInfo(0, NULL, 0));
-}
-
-
-void HeapObjectsMap::SnapshotGenerationFinished() {
- RemoveDeadEntries();
-}
-
-
-void HeapObjectsMap::MoveObject(Address from, Address to) {
- ASSERT(to != NULL);
- ASSERT(from != NULL);
- if (from == to) return;
- void* from_value = entries_map_.Remove(from, AddressHash(from));
- if (from_value == NULL) return;
- int from_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(from_value));
- entries_.at(from_entry_info_index).addr = to;
- HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true);
- if (to_entry->value != NULL) {
- int to_entry_info_index =
- static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
- // Without this operation we will have two EntryInfo's with the same
- // value in addr field. It is bad because later at RemoveDeadEntries
- // one of this entry will be removed with the corresponding entries_map_
- // entry.
- entries_.at(to_entry_info_index).addr = NULL;
- }
- to_entry->value = reinterpret_cast<void*>(from_entry_info_index);
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false);
- if (entry == NULL) return 0;
- int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return entry_info.id;
-}
-
-
-SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true);
- if (entry->value != NULL) {
- int entry_index =
- static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
- EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
- entry_info.size = size;
- return entry_info.id;
- }
- entry->value = reinterpret_cast<void*>(entries_.length());
- SnapshotObjectId id = next_id_;
- next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
- ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
- return id;
-}
-
-
-void HeapObjectsMap::StopHeapObjectsTracking() {
- time_intervals_.Clear();
-}
-
-void HeapObjectsMap::UpdateHeapObjectsMap() {
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::UpdateHeapObjectsMap");
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- FindOrAddEntry(obj->address(), obj->Size());
- }
- RemoveDeadEntries();
-}
-
-
-SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
- UpdateHeapObjectsMap();
- time_intervals_.Add(TimeInterval(next_id_));
- int prefered_chunk_size = stream->GetChunkSize();
- List<v8::HeapStatsUpdate> stats_buffer;
- ASSERT(!entries_.is_empty());
- EntryInfo* entry_info = &entries_.first();
- EntryInfo* end_entry_info = &entries_.last() + 1;
- for (int time_interval_index = 0;
- time_interval_index < time_intervals_.length();
- ++time_interval_index) {
- TimeInterval& time_interval = time_intervals_[time_interval_index];
- SnapshotObjectId time_interval_id = time_interval.id;
- uint32_t entries_size = 0;
- EntryInfo* start_entry_info = entry_info;
- while (entry_info < end_entry_info && entry_info->id < time_interval_id) {
- entries_size += entry_info->size;
- ++entry_info;
- }
- uint32_t entries_count =
- static_cast<uint32_t>(entry_info - start_entry_info);
- if (time_interval.count != entries_count ||
- time_interval.size != entries_size) {
- stats_buffer.Add(v8::HeapStatsUpdate(
- time_interval_index,
- time_interval.count = entries_count,
- time_interval.size = entries_size));
- if (stats_buffer.length() >= prefered_chunk_size) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- stats_buffer.Clear();
- }
- }
- }
- ASSERT(entry_info == end_entry_info);
- if (!stats_buffer.is_empty()) {
- OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
- &stats_buffer.first(), stats_buffer.length());
- if (result == OutputStream::kAbort) return last_assigned_id();
- }
- stream->EndOfStream();
- return last_assigned_id();
-}
-
-
-void HeapObjectsMap::RemoveDeadEntries() {
- ASSERT(entries_.length() > 0 &&
- entries_.at(0).id == 0 &&
- entries_.at(0).addr == NULL);
- int first_free_entry = 1;
- for (int i = 1; i < entries_.length(); ++i) {
- EntryInfo& entry_info = entries_.at(i);
- if (entry_info.accessed) {
- if (first_free_entry != i) {
- entries_.at(first_free_entry) = entry_info;
- }
- entries_.at(first_free_entry).accessed = false;
- HashMap::Entry* entry = entries_map_.Lookup(
- entry_info.addr, AddressHash(entry_info.addr), false);
- ASSERT(entry);
- entry->value = reinterpret_cast<void*>(first_free_entry);
- ++first_free_entry;
- } else {
- if (entry_info.addr) {
- entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr));
- }
- }
- }
- entries_.Rewind(first_free_entry);
- ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
- entries_map_.occupancy());
-}
-
-
-SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) {
- SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash());
- const char* label = info->GetLabel();
- id ^= HashSequentialString(label,
- static_cast<int>(strlen(label)),
- HEAP->HashSeed());
- intptr_t element_count = info->GetElementCount();
- if (element_count != -1)
- id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count),
- v8::internal::kZeroHashSeed);
- return id << 1;
-}
-
-
-size_t HeapObjectsMap::GetUsedMemorySize() const {
- return
- sizeof(*this) +
- sizeof(HashMap::Entry) * entries_map_.capacity() +
- GetMemoryUsedByList(entries_) +
- GetMemoryUsedByList(time_intervals_);
-}
-
-
-HeapSnapshotsCollection::HeapSnapshotsCollection()
- : is_tracking_objects_(false),
- snapshots_uids_(HeapSnapshotsMatch),
- token_enumerator_(new TokenEnumerator()) {
-}
-
-
-static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
- delete *snapshot_ptr;
-}
-
-
-HeapSnapshotsCollection::~HeapSnapshotsCollection() {
- delete token_enumerator_;
- snapshots_.Iterate(DeleteHeapSnapshot);
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type,
- const char* name,
- unsigned uid) {
- is_tracking_objects_ = true; // Start watching for heap objects moves.
- return new HeapSnapshot(this, type, name, uid);
-}
-
-
-void HeapSnapshotsCollection::SnapshotGenerationFinished(
- HeapSnapshot* snapshot) {
- ids_.SnapshotGenerationFinished();
- if (snapshot != NULL) {
- snapshots_.Add(snapshot);
- HashMap::Entry* entry =
- snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()),
- static_cast<uint32_t>(snapshot->uid()),
- true);
- ASSERT(entry->value == NULL);
- entry->value = snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
- HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid),
- false);
- return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
-}
-
-
-void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
- snapshots_.RemoveElement(snapshot);
- unsigned uid = snapshot->uid();
- snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
- static_cast<uint32_t>(uid));
-}
-
-
-Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(
- SnapshotObjectId id) {
- // First perform a full GC in order to avoid dead objects.
- HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "HeapSnapshotsCollection::FindHeapObjectById");
- AssertNoAllocation no_allocation;
- HeapObject* object = NULL;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- // Make sure that object with the given id is still reachable.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next()) {
- if (ids_.FindEntry(obj->address()) == id) {
- ASSERT(object == NULL);
- object = obj;
- // Can't break -- kFilterUnreachable requires full heap traversal.
- }
- }
- return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
-}
-
-
-size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
- STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
- kExpectedHeapSnapshotsCollectionSize ==
- sizeof(HeapSnapshotsCollection)); // NOLINT
- size_t size = sizeof(*this);
- size += names_.GetUsedMemorySize();
- size += ids_.GetUsedMemorySize();
- size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
- size += GetMemoryUsedByList(snapshots_);
- for (int i = 0; i < snapshots_.length(); ++i) {
- size += snapshots_[i]->RawSnapshotSize();
- }
- return size;
-}
-
-
-HeapEntriesMap::HeapEntriesMap()
- : entries_(HeapThingsMatch) {
-}
-
-
-int HeapEntriesMap::Map(HeapThing thing) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
- if (cache_entry == NULL) return HeapEntry::kNoEntry;
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-void HeapEntriesMap::Pair(HeapThing thing, int entry) {
- HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
- ASSERT(cache_entry->value == NULL);
- cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
-}
-
-
-HeapObjectsSet::HeapObjectsSet()
- : entries_(HeapEntriesMap::HeapThingsMatch) {
-}
-
-
-void HeapObjectsSet::Clear() {
- entries_.Clear();
-}
-
-
-bool HeapObjectsSet::Contains(Object* obj) {
- if (!obj->IsHeapObject()) return false;
- HeapObject* object = HeapObject::cast(obj);
- return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL;
-}
-
-
-void HeapObjectsSet::Insert(Object* obj) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
-}
-
-
-const char* HeapObjectsSet::GetTag(Object* obj) {
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), false);
- return cache_entry != NULL
- ? reinterpret_cast<const char*>(cache_entry->value)
- : NULL;
-}
-
-
-void HeapObjectsSet::SetTag(Object* obj, const char* tag) {
- if (!obj->IsHeapObject()) return;
- HeapObject* object = HeapObject::cast(obj);
- HashMap::Entry* cache_entry =
- entries_.Lookup(object, HeapEntriesMap::Hash(object), true);
- cache_entry->value = const_cast<char*>(tag);
-}
-
-
-HeapObject* const V8HeapExplorer::kInternalRootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
- reinterpret_cast<HeapObject*>(
- static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
-V8HeapExplorer::V8HeapExplorer(
- HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress)
- : heap_(Isolate::Current()->heap()),
- snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- filler_(NULL) {
-}
-
-
-V8HeapExplorer::~V8HeapExplorer() {
-}
-
-
-HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) {
- return AddEntry(reinterpret_cast<HeapObject*>(ptr));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
- if (object == kInternalRootObject) {
- snapshot_->AddRootEntry();
- return snapshot_->root();
- } else if (object == kGcRootsObject) {
- HeapEntry* entry = snapshot_->AddGcRootsEntry();
- return entry;
- } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
- HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
- return entry;
- } else if (object->IsJSFunction()) {
- JSFunction* func = JSFunction::cast(object);
- SharedFunctionInfo* shared = func->shared();
- const char* name = shared->bound() ? "native_bind" :
- collection_->names()->GetName(String::cast(shared->name()));
- return AddEntry(object, HeapEntry::kClosure, name);
- } else if (object->IsJSRegExp()) {
- JSRegExp* re = JSRegExp::cast(object);
- return AddEntry(object,
- HeapEntry::kRegExp,
- collection_->names()->GetName(re->Pattern()));
- } else if (object->IsJSObject()) {
- const char* name = collection_->names()->GetName(
- GetConstructorName(JSObject::cast(object)));
- if (object->IsJSGlobalObject()) {
- const char* tag = objects_tags_.GetTag(object);
- if (tag != NULL) {
- name = collection_->names()->GetFormatted("%s / %s", name, tag);
- }
- }
- return AddEntry(object, HeapEntry::kObject, name);
- } else if (object->IsString()) {
- return AddEntry(object,
- HeapEntry::kString,
- collection_->names()->GetName(String::cast(object)));
- } else if (object->IsCode()) {
- return AddEntry(object, HeapEntry::kCode, "");
- } else if (object->IsSharedFunctionInfo()) {
- String* name = String::cast(SharedFunctionInfo::cast(object)->name());
- return AddEntry(object,
- HeapEntry::kCode,
- collection_->names()->GetName(name));
- } else if (object->IsScript()) {
- Object* name = Script::cast(object)->name();
- return AddEntry(object,
- HeapEntry::kCode,
- name->IsString()
- ? collection_->names()->GetName(String::cast(name))
- : "");
- } else if (object->IsNativeContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / NativeContext");
- } else if (object->IsContext()) {
- return AddEntry(object, HeapEntry::kHidden, "system / Context");
- } else if (object->IsFixedArray() ||
- object->IsFixedDoubleArray() ||
- object->IsByteArray() ||
- object->IsExternalArray()) {
- return AddEntry(object, HeapEntry::kArray, "");
- } else if (object->IsHeapNumber()) {
- return AddEntry(object, HeapEntry::kHeapNumber, "number");
- }
- return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object));
-}
-
-
-HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name) {
- int object_size = object->Size();
- SnapshotObjectId object_id =
- collection_->GetObjectId(object->address(), object_size);
- return snapshot_->AddEntry(type, name, object_id, object_size);
-}
-
-
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
- GcSubrootsEnumerator(
- SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
- : filler_(filler),
- explorer_(explorer),
- previous_object_count_(0),
- object_count_(0) {
- }
- void VisitPointers(Object** start, Object** end) {
- object_count_ += end - start;
- }
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- // Skip empty subroots.
- if (previous_object_count_ != object_count_) {
- previous_object_count_ = object_count_;
- filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
- }
- }
- private:
- SnapshotFillerInterface* filler_;
- V8HeapExplorer* explorer_;
- intptr_t previous_object_count_;
- intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
- filler->AddEntry(kInternalRootObject, this);
- filler->AddEntry(kGcRootsObject, this);
- GcSubrootsEnumerator enumerator(filler, this);
- heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
-const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
- switch (object->map()->instance_type()) {
- case MAP_TYPE: return "system / Map";
- case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
- case FOREIGN_TYPE: return "system / Foreign";
- case ODDBALL_TYPE: return "system / Oddball";
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
- case NAME##_TYPE: return "system / "#Name;
- STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
- default: return "system";
- }
-}
-
-
-int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) {
- int objects_count = 0;
- for (HeapObject* obj = iterator->next();
- obj != NULL;
- obj = iterator->next()) {
- objects_count++;
- }
- return objects_count;
-}
-
-
-class IndexedReferencesExtractor : public ObjectVisitor {
- public:
- IndexedReferencesExtractor(V8HeapExplorer* generator,
- HeapObject* parent_obj,
- int parent)
- : generator_(generator),
- parent_obj_(parent_obj),
- parent_(parent),
- next_index_(1) {
- }
- void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if (CheckVisitedAndUnmark(p)) continue;
- generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p);
- }
- }
- static void MarkVisitedField(HeapObject* obj, int offset) {
- if (offset < 0) return;
- Address field = obj->address() + offset;
- ASSERT(!Memory::Object_at(field)->IsFailure());
- ASSERT(Memory::Object_at(field)->IsHeapObject());
- *field |= kFailureTag;
- }
-
- private:
- bool CheckVisitedAndUnmark(Object** field) {
- if ((*field)->IsFailure()) {
- intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask;
- *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag);
- ASSERT((*field)->IsHeapObject());
- return true;
- }
- return false;
- }
- V8HeapExplorer* generator_;
- HeapObject* parent_obj_;
- int parent_;
- int next_index_;
-};
-
-
-void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
- HeapEntry* heap_entry = GetEntry(obj);
- if (heap_entry == NULL) return; // No interest in this object.
- int entry = heap_entry->index();
-
- bool extract_indexed_refs = true;
- if (obj->IsJSGlobalProxy()) {
- ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj));
- } else if (obj->IsJSObject()) {
- ExtractJSObjectReferences(entry, JSObject::cast(obj));
- } else if (obj->IsString()) {
- ExtractStringReferences(entry, String::cast(obj));
- extract_indexed_refs = false;
- } else if (obj->IsContext()) {
- ExtractContextReferences(entry, Context::cast(obj));
- } else if (obj->IsMap()) {
- ExtractMapReferences(entry, Map::cast(obj));
- } else if (obj->IsSharedFunctionInfo()) {
- ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
- } else if (obj->IsScript()) {
- ExtractScriptReferences(entry, Script::cast(obj));
- } else if (obj->IsCodeCache()) {
- ExtractCodeCacheReferences(entry, CodeCache::cast(obj));
- } else if (obj->IsCode()) {
- ExtractCodeReferences(entry, Code::cast(obj));
- } else if (obj->IsJSGlobalPropertyCell()) {
- ExtractJSGlobalPropertyCellReferences(
- entry, JSGlobalPropertyCell::cast(obj));
- extract_indexed_refs = false;
- }
- if (extract_indexed_refs) {
- SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset);
- IndexedReferencesExtractor refs_extractor(this, obj, entry);
- obj->Iterate(&refs_extractor);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) {
- // We need to reference JS global objects from snapshot's root.
- // We use JSGlobalProxy because this is what embedder (e.g. browser)
- // uses for the global object.
- Object* object = proxy->map()->prototype();
- bool is_debug_object = false;
-#ifdef ENABLE_DEBUGGER_SUPPORT
- is_debug_object = object->IsGlobalObject() &&
- Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object));
-#endif
- if (!is_debug_object) {
- SetUserGlobalReference(object);
- }
-}
-
-
-void V8HeapExplorer::ExtractJSObjectReferences(
- int entry, JSObject* js_obj) {
- HeapObject* obj = js_obj;
- ExtractClosureReferences(js_obj, entry);
- ExtractPropertyReferences(js_obj, entry);
- ExtractElementReferences(js_obj, entry);
- ExtractInternalReferences(js_obj, entry);
- SetPropertyReference(
- obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype());
- if (obj->IsJSFunction()) {
- JSFunction* js_fun = JSFunction::cast(js_obj);
- Object* proto_or_map = js_fun->prototype_or_initial_map();
- if (!proto_or_map->IsTheHole()) {
- if (!proto_or_map->IsMap()) {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), proto_or_map,
- NULL,
- JSFunction::kPrototypeOrInitialMapOffset);
- } else {
- SetPropertyReference(
- obj, entry,
- heap_->prototype_symbol(), js_fun->prototype());
- }
- }
- SharedFunctionInfo* shared_info = js_fun->shared();
- // JSFunction has either bindings or literals and never both.
- bool bound = shared_info->bound();
- TagObject(js_fun->literals_or_bindings(),
- bound ? "(function bindings)" : "(function literals)");
- SetInternalReference(js_fun, entry,
- bound ? "bindings" : "literals",
- js_fun->literals_or_bindings(),
- JSFunction::kLiteralsOffset);
- TagObject(shared_info, "(shared function info)");
- SetInternalReference(js_fun, entry,
- "shared", shared_info,
- JSFunction::kSharedFunctionInfoOffset);
- TagObject(js_fun->unchecked_context(), "(context)");
- SetInternalReference(js_fun, entry,
- "context", js_fun->unchecked_context(),
- JSFunction::kContextOffset);
- for (int i = JSFunction::kNonWeakFieldsEndOffset;
- i < JSFunction::kSize;
- i += kPointerSize) {
- SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
- }
- } else if (obj->IsGlobalObject()) {
- GlobalObject* global_obj = GlobalObject::cast(obj);
- SetInternalReference(global_obj, entry,
- "builtins", global_obj->builtins(),
- GlobalObject::kBuiltinsOffset);
- SetInternalReference(global_obj, entry,
- "native_context", global_obj->native_context(),
- GlobalObject::kNativeContextOffset);
- SetInternalReference(global_obj, entry,
- "global_receiver", global_obj->global_receiver(),
- GlobalObject::kGlobalReceiverOffset);
- }
- TagObject(js_obj->properties(), "(object properties)");
- SetInternalReference(obj, entry,
- "properties", js_obj->properties(),
- JSObject::kPropertiesOffset);
- TagObject(js_obj->elements(), "(object elements)");
- SetInternalReference(obj, entry,
- "elements", js_obj->elements(),
- JSObject::kElementsOffset);
-}
-
-
-void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
- if (string->IsConsString()) {
- ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first());
- SetInternalReference(cs, entry, "second", cs->second());
- } else if (string->IsSlicedString()) {
- SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent());
- }
-}
-
-
-void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
-#define EXTRACT_CONTEXT_FIELD(index, type, name) \
- SetInternalReference(context, entry, #name, context->get(Context::index), \
- FixedArray::OffsetOfElementAt(Context::index));
- EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure);
- EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous);
- EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension);
- EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global);
- if (context->IsNativeContext()) {
- TagObject(context->jsfunction_result_caches(),
- "(context func. result caches)");
- TagObject(context->normalized_map_cache(), "(context norm. map cache)");
- TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->data(), "(context data)");
- NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
-#undef EXTRACT_CONTEXT_FIELD
- for (int i = Context::FIRST_WEAK_SLOT;
- i < Context::NATIVE_CONTEXT_SLOTS;
- ++i) {
- SetWeakReference(context, entry, i, context->get(i),
- FixedArray::OffsetOfElementAt(i));
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- SetInternalReference(map, entry,
- "prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(map, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
- if (map->HasTransitionArray()) {
- TransitionArray* transitions = map->transitions();
-
- Object* back_pointer = transitions->back_pointer_storage();
- TagObject(transitions->back_pointer_storage(), "(back pointer)");
- SetInternalReference(transitions, entry,
- "backpointer", back_pointer,
- TransitionArray::kBackPointerStorageOffset);
- IndexedReferencesExtractor transitions_refs(this, transitions, entry);
- transitions->Iterate(&transitions_refs);
-
- TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry,
- "transitions", transitions,
- Map::kTransitionsOrBackPointerOffset);
- } else {
- Object* back_pointer = map->GetBackPointer();
- TagObject(back_pointer, "(back pointer)");
- SetInternalReference(map, entry,
- "backpointer", back_pointer,
- Map::kTransitionsOrBackPointerOffset);
- }
- DescriptorArray* descriptors = map->instance_descriptors();
- TagObject(descriptors, "(map descriptors)");
- SetInternalReference(map, entry,
- "descriptors", descriptors,
- Map::kDescriptorsOffset);
-
- SetInternalReference(map, entry,
- "code_cache", map->code_cache(),
- Map::kCodeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
- int entry, SharedFunctionInfo* shared) {
- HeapObject* obj = shared;
- SetInternalReference(obj, entry,
- "name", shared->name(),
- SharedFunctionInfo::kNameOffset);
- TagObject(shared->code(), "(code)");
- SetInternalReference(obj, entry,
- "code", shared->code(),
- SharedFunctionInfo::kCodeOffset);
- TagObject(shared->scope_info(), "(function scope info)");
- SetInternalReference(obj, entry,
- "scope_info", shared->scope_info(),
- SharedFunctionInfo::kScopeInfoOffset);
- SetInternalReference(obj, entry,
- "instance_class_name", shared->instance_class_name(),
- SharedFunctionInfo::kInstanceClassNameOffset);
- SetInternalReference(obj, entry,
- "script", shared->script(),
- SharedFunctionInfo::kScriptOffset);
- TagObject(shared->construct_stub(), "(code)");
- SetInternalReference(obj, entry,
- "construct_stub", shared->construct_stub(),
- SharedFunctionInfo::kConstructStubOffset);
- SetInternalReference(obj, entry,
- "function_data", shared->function_data(),
- SharedFunctionInfo::kFunctionDataOffset);
- SetInternalReference(obj, entry,
- "debug_info", shared->debug_info(),
- SharedFunctionInfo::kDebugInfoOffset);
- SetInternalReference(obj, entry,
- "inferred_name", shared->inferred_name(),
- SharedFunctionInfo::kInferredNameOffset);
- SetInternalReference(obj, entry,
- "this_property_assignments",
- shared->this_property_assignments(),
- SharedFunctionInfo::kThisPropertyAssignmentsOffset);
- SetWeakReference(obj, entry,
- 1, shared->initial_map(),
- SharedFunctionInfo::kInitialMapOffset);
-}
-
-
-void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) {
- HeapObject* obj = script;
- SetInternalReference(obj, entry,
- "source", script->source(),
- Script::kSourceOffset);
- SetInternalReference(obj, entry,
- "name", script->name(),
- Script::kNameOffset);
- SetInternalReference(obj, entry,
- "data", script->data(),
- Script::kDataOffset);
- SetInternalReference(obj, entry,
- "context_data", script->context_data(),
- Script::kContextOffset);
- TagObject(script->line_ends(), "(script line ends)");
- SetInternalReference(obj, entry,
- "line_ends", script->line_ends(),
- Script::kLineEndsOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeCacheReferences(
- int entry, CodeCache* code_cache) {
- TagObject(code_cache->default_cache(), "(default code cache)");
- SetInternalReference(code_cache, entry,
- "default_cache", code_cache->default_cache(),
- CodeCache::kDefaultCacheOffset);
- TagObject(code_cache->normal_type_cache(), "(code type cache)");
- SetInternalReference(code_cache, entry,
- "type_cache", code_cache->normal_type_cache(),
- CodeCache::kNormalTypeCacheOffset);
-}
-
-
-void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
- TagObject(code->relocation_info(), "(code relocation info)");
- SetInternalReference(code, entry,
- "relocation_info", code->relocation_info(),
- Code::kRelocationInfoOffset);
- SetInternalReference(code, entry,
- "handler_table", code->handler_table(),
- Code::kHandlerTableOffset);
- TagObject(code->deoptimization_data(), "(code deopt data)");
- SetInternalReference(code, entry,
- "deoptimization_data", code->deoptimization_data(),
- Code::kDeoptimizationDataOffset);
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
- SetInternalReference(code, entry,
- "gc_metadata", code->gc_metadata(),
- Code::kGCMetadataOffset);
-}
-
-
-void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences(
- int entry, JSGlobalPropertyCell* cell) {
- SetInternalReference(cell, entry, "value", cell->value());
-}
-
-
-void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) {
- if (!js_obj->IsJSFunction()) return;
-
- JSFunction* func = JSFunction::cast(js_obj);
- if (func->shared()->bound()) {
- FixedArray* bindings = func->function_bindings();
- SetNativeBindReference(js_obj, entry, "bound_this",
- bindings->get(JSFunction::kBoundThisIndex));
- SetNativeBindReference(js_obj, entry, "bound_function",
- bindings->get(JSFunction::kBoundFunctionIndex));
- for (int i = JSFunction::kBoundArgumentsStartIndex;
- i < bindings->length(); i++) {
- const char* reference_name = collection_->names()->GetFormatted(
- "bound_argument_%d",
- i - JSFunction::kBoundArgumentsStartIndex);
- SetNativeBindReference(js_obj, entry, reference_name,
- bindings->get(i));
- }
- } else {
- Context* context = func->context()->declaration_context();
- ScopeInfo* scope_info = context->closure()->shared()->scope_info();
- // Add context allocated locals.
- int context_locals = scope_info->ContextLocalCount();
- for (int i = 0; i < context_locals; ++i) {
- String* local_name = scope_info->ContextLocalName(i);
- int idx = Context::MIN_CONTEXT_SLOTS + i;
- SetClosureReference(js_obj, entry, local_name, context->get(idx));
- }
-
- // Add function variable.
- if (scope_info->HasFunctionName()) {
- String* name = scope_info->FunctionName();
- VariableMode mode;
- int idx = scope_info->FunctionContextSlotIndex(name, &mode);
- if (idx >= 0) {
- SetClosureReference(js_obj, entry, name, context->get(idx));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastProperties()) {
- DescriptorArray* descs = js_obj->map()->instance_descriptors();
- int real_size = js_obj->map()->NumberOfOwnDescriptors();
- for (int i = 0; i < descs->number_of_descriptors(); i++) {
- if (descs->GetDetails(i).descriptor_index() > real_size) continue;
- switch (descs->GetType(i)) {
- case FIELD: {
- int index = descs->GetFieldIndex(i);
-
- String* k = descs->GetKey(i);
- if (index < js_obj->map()->inobject_properties()) {
- Object* value = js_obj->InObjectPropertyAt(index);
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(
- js_obj, entry,
- k, value,
- NULL,
- js_obj->GetInObjectPropertyOffset(index));
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(
- js_obj, entry,
- "hidden_properties", value,
- js_obj->GetInObjectPropertyOffset(index));
- }
- } else {
- Object* value = js_obj->FastPropertyAt(index);
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(js_obj, entry, k, value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- break;
- }
- case CONSTANT_FUNCTION:
- SetPropertyReference(
- js_obj, entry,
- descs->GetKey(i), descs->GetConstantFunction(i));
- break;
- case CALLBACKS: {
- Object* callback_obj = descs->GetValue(i);
- if (callback_obj->IsAccessorPair()) {
- AccessorPair* accessors = AccessorPair::cast(callback_obj);
- if (Object* getter = accessors->getter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- getter, "get-%s");
- }
- if (Object* setter = accessors->setter()) {
- SetPropertyReference(js_obj, entry, descs->GetKey(i),
- setter, "set-%s");
- }
- }
- break;
- }
- case NORMAL: // only in slow mode
- case HANDLER: // only in lookup results, not in descriptors
- case INTERCEPTOR: // only in lookup results, not in descriptors
- break;
- case TRANSITION:
- case NONEXISTENT:
- UNREACHABLE();
- break;
- }
- }
- } else {
- StringDictionary* dictionary = js_obj->property_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- Object* target = dictionary->ValueAt(i);
- // We assume that global objects can only have slow properties.
- Object* value = target->IsJSGlobalPropertyCell()
- ? JSGlobalPropertyCell::cast(target)->value()
- : target;
- if (k != heap_->hidden_symbol()) {
- SetPropertyReference(js_obj, entry, String::cast(k), value);
- } else {
- TagObject(value, "(hidden properties)");
- SetInternalReference(js_obj, entry, "hidden_properties", value);
- }
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) {
- if (js_obj->HasFastObjectElements()) {
- FixedArray* elements = FixedArray::cast(js_obj->elements());
- int length = js_obj->IsJSArray() ?
- Smi::cast(JSArray::cast(js_obj)->length())->value() :
- elements->length();
- for (int i = 0; i < length; ++i) {
- if (!elements->get(i)->IsTheHole()) {
- SetElementReference(js_obj, entry, i, elements->get(i));
- }
- }
- } else if (js_obj->HasDictionaryElements()) {
- SeededNumberDictionary* dictionary = js_obj->element_dictionary();
- int length = dictionary->Capacity();
- for (int i = 0; i < length; ++i) {
- Object* k = dictionary->KeyAt(i);
- if (dictionary->IsKey(k)) {
- ASSERT(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
- }
- }
- }
-}
-
-
-void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) {
- int length = js_obj->GetInternalFieldCount();
- for (int i = 0; i < length; ++i) {
- Object* o = js_obj->GetInternalField(i);
- SetInternalReference(
- js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i));
- }
-}
-
-
-String* V8HeapExplorer::GetConstructorName(JSObject* object) {
- Heap* heap = object->GetHeap();
- if (object->IsJSFunction()) return heap->closure_symbol();
- String* constructor_name = object->constructor_name();
- if (constructor_name == heap->Object_symbol()) {
- // Look up an immediate "constructor" property, if it is a function,
- // return its name. This is for instances of binding objects, which
- // have prototype constructor type "Object".
- Object* constructor_prop = NULL;
- LookupResult result(heap->isolate());
- object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result);
- if (!result.IsFound()) return object->constructor_name();
-
- constructor_prop = result.GetLazyValue();
- if (constructor_prop->IsJSFunction()) {
- Object* maybe_name =
- JSFunction::cast(constructor_prop)->shared()->name();
- if (maybe_name->IsString()) {
- String* name = String::cast(maybe_name);
- if (name->length() > 0) return name;
- }
- }
- }
- return object->constructor_name();
-}
-
-
-HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
- if (!obj->IsHeapObject()) return NULL;
- return filler_->FindOrAddEntry(obj, this);
-}
-
-
-class RootsReferencesExtractor : public ObjectVisitor {
- private:
- struct IndexTag {
- IndexTag(int index, VisitorSynchronization::SyncTag tag)
- : index(index), tag(tag) { }
- int index;
- VisitorSynchronization::SyncTag tag;
- };
-
- public:
- RootsReferencesExtractor()
- : collecting_all_references_(false),
- previous_reference_count_(0) {
- }
-
- void VisitPointers(Object** start, Object** end) {
- if (collecting_all_references_) {
- for (Object** p = start; p < end; p++) all_references_.Add(*p);
- } else {
- for (Object** p = start; p < end; p++) strong_references_.Add(*p);
- }
- }
-
- void SetCollectingAllReferences() { collecting_all_references_ = true; }
-
- void FillReferences(V8HeapExplorer* explorer) {
- ASSERT(strong_references_.length() <= all_references_.length());
- for (int i = 0; i < reference_tags_.length(); ++i) {
- explorer->SetGcRootsReference(reference_tags_[i].tag);
- }
- int strong_index = 0, all_index = 0, tags_index = 0;
- while (all_index < all_references_.length()) {
- if (strong_index < strong_references_.length() &&
- strong_references_[strong_index] == all_references_[all_index]) {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- false,
- all_references_[all_index++]);
- ++strong_index;
- } else {
- explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
- true,
- all_references_[all_index++]);
- }
- if (reference_tags_[tags_index].index == all_index) ++tags_index;
- }
- }
-
- void Synchronize(VisitorSynchronization::SyncTag tag) {
- if (collecting_all_references_ &&
- previous_reference_count_ != all_references_.length()) {
- previous_reference_count_ = all_references_.length();
- reference_tags_.Add(IndexTag(previous_reference_count_, tag));
- }
- }
-
- private:
- bool collecting_all_references_;
- List<Object*> strong_references_;
- List<Object*> all_references_;
- int previous_reference_count_;
- List<IndexTag> reference_tags_;
-};
-
-
-bool V8HeapExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
-
- filler_ = filler;
- bool interrupted = false;
-
- // Heap iteration with filtering must be finished in any case.
- for (HeapObject* obj = iterator.next();
- obj != NULL;
- obj = iterator.next(), progress_->ProgressStep()) {
- if (!interrupted) {
- ExtractReferences(obj);
- if (!progress_->ProgressReport(false)) interrupted = true;
- }
- }
- if (interrupted) {
- filler_ = NULL;
- return false;
- }
-
- SetRootGcRootsReference();
- RootsReferencesExtractor extractor;
- heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
- extractor.SetCollectingAllReferences();
- heap_->IterateRoots(&extractor, VISIT_ALL);
- extractor.FillReferences(this);
- filler_ = NULL;
- return progress_->ProgressReport(true);
-}
-
-
-bool V8HeapExplorer::IsEssentialObject(Object* object) {
- return object->IsHeapObject()
- && !object->IsOddball()
- && object != heap_->empty_byte_array()
- && object != heap_->empty_fixed_array()
- && object != heap_->empty_descriptor_array()
- && object != heap_->fixed_array_map()
- && object != heap_->global_property_cell_map()
- && object != heap_->shared_function_info_map()
- && object != heap_->free_space_map()
- && object != heap_->one_pointer_filler_map()
- && object != heap_->two_pointer_filler_map();
-}
-
-
-void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
- parent_entry,
- collection_->names()->GetName(reference_name),
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetNamedReference(HeapGraphEdge::kShortcut,
- parent_entry,
- reference_name,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kElement,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- const char* reference_name,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- reference_name,
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry == NULL) return;
- if (IsEssentialObject(child_obj)) {
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- parent_entry,
- collection_->names()->GetName(index),
- child_entry);
- }
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
-}
-
-
-void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL && IsEssentialObject(child_obj)) {
- filler_->SetIndexedReference(HeapGraphEdge::kHidden,
- parent_entry,
- index,
- child_entry);
- }
-}
-
-
-void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
- int parent_entry,
- int index,
- Object* child_obj,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- filler_->SetIndexedReference(HeapGraphEdge::kWeak,
- parent_entry,
- index,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
- int parent_entry,
- String* reference_name,
- Object* child_obj,
- const char* name_format_string,
- int field_offset) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- HeapGraphEdge::Type type = reference_name->length() > 0 ?
- HeapGraphEdge::kProperty : HeapGraphEdge::kInternal;
- const char* name = name_format_string != NULL ?
- collection_->names()->GetFormatted(
- name_format_string,
- *reference_name->ToCString(DISALLOW_NULLS,
- ROBUST_STRING_TRAVERSAL)) :
- collection_->names()->GetName(reference_name);
-
- filler_->SetNamedReference(type,
- parent_entry,
- name,
- child_entry);
- IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
- }
-}
-
-
-void V8HeapExplorer::SetRootGcRootsReference() {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- snapshot_->gc_roots());
-}
-
-
-void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- ASSERT(child_entry != NULL);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kShortcut,
- snapshot_->root()->index(),
- child_entry);
-}
-
-
-void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->gc_roots()->index(),
- snapshot_->gc_subroot(tag));
-}
-
-
-void V8HeapExplorer::SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
- HeapEntry* child_entry = GetEntry(child_obj);
- if (child_entry != NULL) {
- const char* name = GetStrongGcSubrootName(child_obj);
- if (name != NULL) {
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- snapshot_->gc_subroot(tag)->index(),
- name,
- child_entry);
- } else {
- filler_->SetIndexedAutoIndexReference(
- is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
- snapshot_->gc_subroot(tag)->index(),
- child_entry);
- }
- }
-}
-
-
-const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
- if (strong_gc_subroot_names_.is_empty()) {
-#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name);
-#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name)
- STRONG_ROOT_LIST(ROOT_NAME)
-#undef ROOT_NAME
-#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map)
- STRUCT_LIST(STRUCT_MAP_NAME)
-#undef STRUCT_MAP_NAME
-#define SYMBOL_NAME(name, str) NAME_ENTRY(name)
- SYMBOL_LIST(SYMBOL_NAME)
-#undef SYMBOL_NAME
-#undef NAME_ENTRY
- CHECK(!strong_gc_subroot_names_.is_empty());
- }
- return strong_gc_subroot_names_.GetTag(object);
-}
-
-
-void V8HeapExplorer::TagObject(Object* obj, const char* tag) {
- if (IsEssentialObject(obj)) {
- HeapEntry* entry = GetEntry(obj);
- if (entry->name()[0] == '\0') {
- entry->set_name(tag);
- }
- }
-}
-
-
-class GlobalObjectsEnumerator : public ObjectVisitor {
- public:
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- if ((*p)->IsNativeContext()) {
- Context* context = Context::cast(*p);
- JSObject* proxy = context->global_proxy();
- if (proxy->IsJSGlobalProxy()) {
- Object* global = proxy->map()->prototype();
- if (global->IsJSGlobalObject()) {
- objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global)));
- }
- }
- }
- }
- }
- int count() { return objects_.length(); }
- Handle<JSGlobalObject>& at(int i) { return objects_[i]; }
-
- private:
- List<Handle<JSGlobalObject> > objects_;
-};
-
-
-// Modifies heap. Must not be run during heap traversal.
-void V8HeapExplorer::TagGlobalObjects() {
- HandleScope scope;
- Isolate* isolate = Isolate::Current();
- GlobalObjectsEnumerator enumerator;
- isolate->global_handles()->IterateAllRoots(&enumerator);
- Handle<String> document_string =
- isolate->factory()->NewStringFromAscii(CStrVector("document"));
- Handle<String> url_string =
- isolate->factory()->NewStringFromAscii(CStrVector("URL"));
- const char** urls = NewArray<const char*>(enumerator.count());
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- urls[i] = NULL;
- HandleScope scope;
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- Object* obj_document;
- if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
- obj_document->IsJSObject()) {
- // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
- // As result GetProperty(*url_string) will crash.
- if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
- continue;
- JSObject* document = JSObject::cast(obj_document);
- Object* obj_url;
- if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
- obj_url->IsString()) {
- urls[i] = collection_->names()->GetName(String::cast(obj_url));
- }
- }
- }
-
- AssertNoAllocation no_allocation;
- for (int i = 0, l = enumerator.count(); i < l; ++i) {
- objects_tags_.SetTag(*enumerator.at(i), urls[i]);
- }
-
- DeleteArray(urls);
-}
-
-
-class GlobalHandlesExtractor : public ObjectVisitor {
- public:
- explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer)
- : explorer_(explorer) {}
- virtual ~GlobalHandlesExtractor() {}
- virtual void VisitPointers(Object** start, Object** end) {
- UNREACHABLE();
- }
- virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {
- explorer_->VisitSubtreeWrapper(p, class_id);
- }
- private:
- NativeObjectsExplorer* explorer_;
-};
-
-
-class BasicHeapEntriesAllocator : public HeapEntriesAllocator {
- public:
- BasicHeapEntriesAllocator(
- HeapSnapshot* snapshot,
- HeapEntry::Type entries_type)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- entries_type_(entries_type) {
- }
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntry::Type entries_type_;
-};
-
-
-HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) {
- v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr);
- intptr_t elements = info->GetElementCount();
- intptr_t size = info->GetSizeInBytes();
- const char* name = elements != -1
- ? collection_->names()->GetFormatted(
- "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements)
- : collection_->names()->GetCopy(info->GetLabel());
- return snapshot_->AddEntry(
- entries_type_,
- name,
- HeapObjectsMap::GenerateId(info),
- size != -1 ? static_cast<int>(size) : 0);
-}
-
-
-NativeObjectsExplorer::NativeObjectsExplorer(
- HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress)
- : snapshot_(snapshot),
- collection_(snapshot_->collection()),
- progress_(progress),
- embedder_queried_(false),
- objects_by_info_(RetainedInfosMatch),
- native_groups_(StringsMatch),
- filler_(NULL) {
- synthetic_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic);
- native_entries_allocator_ =
- new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative);
-}
-
-
-NativeObjectsExplorer::~NativeObjectsExplorer() {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- info->Dispose();
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- delete objects;
- }
- for (HashMap::Entry* p = native_groups_.Start();
- p != NULL;
- p = native_groups_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->value);
- info->Dispose();
- }
- delete synthetic_entries_allocator_;
- delete native_entries_allocator_;
-}
-
-
-int NativeObjectsExplorer::EstimateObjectsCount() {
- FillRetainedObjects();
- return objects_by_info_.occupancy();
-}
-
-
-void NativeObjectsExplorer::FillRetainedObjects() {
- if (embedder_queried_) return;
- Isolate* isolate = Isolate::Current();
- // Record objects that are joined into ObjectGroups.
- isolate->heap()->CallGlobalGCPrologueCallback();
- List<ObjectGroup*>* groups = isolate->global_handles()->object_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ObjectGroup* group = groups->at(i);
- if (group->info_ == NULL) continue;
- List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_);
- for (size_t j = 0; j < group->length_; ++j) {
- HeapObject* obj = HeapObject::cast(*group->objects_[j]);
- list->Add(obj);
- in_groups_.Insert(obj);
- }
- group->info_ = NULL; // Acquire info object ownership.
- }
- isolate->global_handles()->RemoveObjectGroups();
- isolate->heap()->CallGlobalGCEpilogueCallback();
- // Record objects that are not in ObjectGroups, but have class ID.
- GlobalHandlesExtractor extractor(this);
- isolate->global_handles()->IterateAllRootsWithClassIds(&extractor);
- embedder_queried_ = true;
-}
-
-void NativeObjectsExplorer::FillImplicitReferences() {
- Isolate* isolate = Isolate::Current();
- List<ImplicitRefGroup*>* groups =
- isolate->global_handles()->implicit_ref_groups();
- for (int i = 0; i < groups->length(); ++i) {
- ImplicitRefGroup* group = groups->at(i);
- HeapObject* parent = *group->parent_;
- int parent_entry =
- filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
- ASSERT(parent_entry != HeapEntry::kNoEntry);
- Object*** children = group->children_;
- for (size_t j = 0; j < group->length_; ++j) {
- Object* child = *children[j];
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(child, native_entries_allocator_);
- filler_->SetNamedReference(
- HeapGraphEdge::kInternal,
- parent_entry,
- "native",
- child_entry);
- }
- }
-}
-
-List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo(
- v8::RetainedObjectInfo* info) {
- HashMap::Entry* entry =
- objects_by_info_.Lookup(info, InfoHash(info), true);
- if (entry->value != NULL) {
- info->Dispose();
- } else {
- entry->value = new List<HeapObject*>(4);
- }
- return reinterpret_cast<List<HeapObject*>* >(entry->value);
-}
-
-
-bool NativeObjectsExplorer::IterateAndExtractReferences(
- SnapshotFillerInterface* filler) {
- filler_ = filler;
- FillRetainedObjects();
- FillImplicitReferences();
- if (EstimateObjectsCount() > 0) {
- for (HashMap::Entry* p = objects_by_info_.Start();
- p != NULL;
- p = objects_by_info_.Next(p)) {
- v8::RetainedObjectInfo* info =
- reinterpret_cast<v8::RetainedObjectInfo*>(p->key);
- SetNativeRootReference(info);
- List<HeapObject*>* objects =
- reinterpret_cast<List<HeapObject*>* >(p->value);
- for (int i = 0; i < objects->length(); ++i) {
- SetWrapperNativeReferences(objects->at(i), info);
- }
- }
- SetRootNativeRootsReference();
- }
- filler_ = NULL;
- return true;
-}
-
-
-class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo {
- public:
- explicit NativeGroupRetainedObjectInfo(const char* label)
- : disposed_(false),
- hash_(reinterpret_cast<intptr_t>(label)),
- label_(label) {
- }
-
- virtual ~NativeGroupRetainedObjectInfo() {}
- virtual void Dispose() {
- CHECK(!disposed_);
- disposed_ = true;
- delete this;
- }
- virtual bool IsEquivalent(RetainedObjectInfo* other) {
- return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel());
- }
- virtual intptr_t GetHash() { return hash_; }
- virtual const char* GetLabel() { return label_; }
-
- private:
- bool disposed_;
- intptr_t hash_;
- const char* label_;
-};
-
-
-NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo(
- const char* label) {
- const char* label_copy = collection_->names()->GetCopy(label);
- uint32_t hash = HashSequentialString(label_copy,
- static_cast<int>(strlen(label_copy)),
- HEAP->HashSeed());
- HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy),
- hash, true);
- if (entry->value == NULL) {
- entry->value = new NativeGroupRetainedObjectInfo(label);
- }
- return static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
-}
-
-
-void NativeObjectsExplorer::SetNativeRootReference(
- v8::RetainedObjectInfo* info) {
- HeapEntry* child_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(child_entry != NULL);
- NativeGroupRetainedObjectInfo* group_info =
- FindOrAddGroupInfo(info->GetGroupLabel());
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_);
- filler_->SetNamedAutoIndexReference(
- HeapGraphEdge::kInternal,
- group_entry->index(),
- child_entry);
-}
-
-
-void NativeObjectsExplorer::SetWrapperNativeReferences(
- HeapObject* wrapper, v8::RetainedObjectInfo* info) {
- HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
- ASSERT(wrapper_entry != NULL);
- HeapEntry* info_entry =
- filler_->FindOrAddEntry(info, native_entries_allocator_);
- ASSERT(info_entry != NULL);
- filler_->SetNamedReference(HeapGraphEdge::kInternal,
- wrapper_entry->index(),
- "native",
- info_entry);
- filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement,
- info_entry->index(),
- wrapper_entry);
-}
-
-
-void NativeObjectsExplorer::SetRootNativeRootsReference() {
- for (HashMap::Entry* entry = native_groups_.Start();
- entry;
- entry = native_groups_.Next(entry)) {
- NativeGroupRetainedObjectInfo* group_info =
- static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
- HeapEntry* group_entry =
- filler_->FindOrAddEntry(group_info, native_entries_allocator_);
- ASSERT(group_entry != NULL);
- filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- snapshot_->root()->index(),
- group_entry);
- }
-}
-
-
-void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) {
- if (in_groups_.Contains(*p)) return;
- Isolate* isolate = Isolate::Current();
- v8::RetainedObjectInfo* info =
- isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p);
- if (info == NULL) return;
- GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p));
-}
-
-
-class SnapshotFiller : public SnapshotFillerInterface {
- public:
- explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
- : snapshot_(snapshot),
- collection_(snapshot->collection()),
- entries_(entries) { }
- HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = allocator->AllocateEntry(ptr);
- entries_->Pair(ptr, entry->index());
- return entry;
- }
- HeapEntry* FindEntry(HeapThing ptr) {
- int index = entries_->Map(ptr);
- return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL;
- }
- HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) {
- HeapEntry* entry = FindEntry(ptr);
- return entry != NULL ? entry : AddEntry(ptr, allocator);
- }
- void SetIndexedReference(HeapGraphEdge::Type type,
- int parent,
- int index,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetIndexedReference(type, index, child_entry);
- }
- void SetNamedReference(HeapGraphEdge::Type type,
- int parent,
- const char* reference_name,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- parent_entry->SetNamedReference(type, reference_name, child_entry);
- }
- void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent,
- HeapEntry* child_entry) {
- HeapEntry* parent_entry = &snapshot_->entries()[parent];
- int index = parent_entry->children_count() + 1;
- parent_entry->SetNamedReference(
- type,
- collection_->names()->GetName(index),
- child_entry);
- }
-
- private:
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- HeapEntriesMap* entries_;
-};
-
-
-HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control)
- : snapshot_(snapshot),
- control_(control),
- v8_heap_explorer_(snapshot_, this),
- dom_explorer_(snapshot_, this) {
-}
-
-
-bool HeapSnapshotGenerator::GenerateSnapshot() {
- v8_heap_explorer_.TagGlobalObjects();
-
- // TODO(1562) Profiler assumes that any object that is in the heap after
- // full GC is reachable from the root when computing dominators.
- // This is not true for weakly reachable objects.
- // As a temporary solution we call GC twice.
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
- Isolate::Current()->heap()->CollectAllGarbage(
- Heap::kMakeHeapIterableMask,
- "HeapSnapshotGenerator::GenerateSnapshot");
-
-#ifdef VERIFY_HEAP
- Heap* debug_heap = Isolate::Current()->heap();
- CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
- CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
- CHECK(!debug_heap->code_space()->was_swept_conservatively());
- CHECK(!debug_heap->cell_space()->was_swept_conservatively());
- CHECK(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
- // The following code uses heap iterators, so we want the heap to be
- // stable. It should follow TagGlobalObjects as that can allocate.
- AssertNoAllocation no_alloc;
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- SetProgressTotal(1); // 1 pass.
-
-#ifdef VERIFY_HEAP
- debug_heap->Verify();
-#endif
-
- if (!FillReferences()) return false;
-
- snapshot_->FillChildren();
- snapshot_->RememberLastJSObjectId();
-
- progress_counter_ = progress_total_;
- if (!ProgressReport(true)) return false;
- return true;
-}
-
-
-void HeapSnapshotGenerator::ProgressStep() {
- ++progress_counter_;
-}
-
-
-bool HeapSnapshotGenerator::ProgressReport(bool force) {
- const int kProgressReportGranularity = 10000;
- if (control_ != NULL
- && (force || progress_counter_ % kProgressReportGranularity == 0)) {
- return
- control_->ReportProgressValue(progress_counter_, progress_total_) ==
- v8::ActivityControl::kContinue;
- }
- return true;
-}
-
-
-void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
- if (control_ == NULL) return;
- HeapIterator iterator(HeapIterator::kFilterUnreachable);
- progress_total_ = iterations_count * (
- v8_heap_explorer_.EstimateObjectsCount(&iterator) +
- dom_explorer_.EstimateObjectsCount());
- progress_counter_ = 0;
-}
-
-
-bool HeapSnapshotGenerator::FillReferences() {
- SnapshotFiller filler(snapshot_, &entries_);
- v8_heap_explorer_.AddRootEntries(&filler);
- return v8_heap_explorer_.IterateAndExtractReferences(&filler)
- && dom_explorer_.IterateAndExtractReferences(&filler);
-}
-
-
-template<int bytes> struct MaxDecimalDigitsIn;
-template<> struct MaxDecimalDigitsIn<4> {
- static const int kSigned = 11;
- static const int kUnsigned = 10;
-};
-template<> struct MaxDecimalDigitsIn<8> {
- static const int kSigned = 20;
- static const int kUnsigned = 20;
-};
-
-
-class OutputStreamWriter {
- public:
- explicit OutputStreamWriter(v8::OutputStream* stream)
- : stream_(stream),
- chunk_size_(stream->GetChunkSize()),
- chunk_(chunk_size_),
- chunk_pos_(0),
- aborted_(false) {
- ASSERT(chunk_size_ > 0);
- }
- bool aborted() { return aborted_; }
- void AddCharacter(char c) {
- ASSERT(c != '\0');
- ASSERT(chunk_pos_ < chunk_size_);
- chunk_[chunk_pos_++] = c;
- MaybeWriteChunk();
- }
- void AddString(const char* s) {
- AddSubstring(s, StrLength(s));
- }
- void AddSubstring(const char* s, int n) {
- if (n <= 0) return;
- ASSERT(static_cast<size_t>(n) <= strlen(s));
- const char* s_end = s + n;
- while (s < s_end) {
- int s_chunk_size = Min(
- chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
- ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
- s += s_chunk_size;
- chunk_pos_ += s_chunk_size;
- MaybeWriteChunk();
- }
- }
- void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
- void Finalize() {
- if (aborted_) return;
- ASSERT(chunk_pos_ < chunk_size_);
- if (chunk_pos_ != 0) {
- WriteChunk();
- }
- stream_->EndOfStream();
- }
-
- private:
- template<typename T>
- void AddNumberImpl(T n, const char* format) {
- // Buffer for the longest value plus trailing \0
- static const int kMaxNumberSize =
- MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1;
- if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
- int result = OS::SNPrintF(
- chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
- ASSERT(result != -1);
- chunk_pos_ += result;
- MaybeWriteChunk();
- } else {
- EmbeddedVector<char, kMaxNumberSize> buffer;
- int result = OS::SNPrintF(buffer, format, n);
- USE(result);
- ASSERT(result != -1);
- AddString(buffer.start());
- }
- }
- void MaybeWriteChunk() {
- ASSERT(chunk_pos_ <= chunk_size_);
- if (chunk_pos_ == chunk_size_) {
- WriteChunk();
- }
- }
- void WriteChunk() {
- if (aborted_) return;
- if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) ==
- v8::OutputStream::kAbort) aborted_ = true;
- chunk_pos_ = 0;
- }
-
- v8::OutputStream* stream_;
- int chunk_size_;
- ScopedVector<char> chunk_;
- int chunk_pos_;
- bool aborted_;
-};
-
-
-// type, name|index, to_node.
-const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
-// type, name, id, self_size, children_index.
-const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
-
-void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
- ASSERT(writer_ == NULL);
- writer_ = new OutputStreamWriter(stream);
-
- HeapSnapshot* original_snapshot = NULL;
- if (snapshot_->RawSnapshotSize() >=
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) {
- // The snapshot is too big. Serialize a fake snapshot.
- original_snapshot = snapshot_;
- snapshot_ = CreateFakeSnapshot();
- }
-
- SerializeImpl();
-
- delete writer_;
- writer_ = NULL;
-
- if (original_snapshot != NULL) {
- delete snapshot_;
- snapshot_ = original_snapshot;
- }
-}
-
-
-HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() {
- HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(),
- HeapSnapshot::kFull,
- snapshot_->title(),
- snapshot_->uid());
- result->AddRootEntry();
- const char* text = snapshot_->collection()->names()->GetFormatted(
- "The snapshot is too big. "
- "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. "
- "Actual snapshot size is %" V8_PTR_PREFIX "u MB.",
- SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB,
- (snapshot_->RawSnapshotSize() + MB - 1) / MB);
- HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4);
- result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message);
- result->FillChildren();
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeImpl() {
- ASSERT(0 == snapshot_->root()->index());
- writer_->AddCharacter('{');
- writer_->AddString("\"snapshot\":{");
- SerializeSnapshot();
- if (writer_->aborted()) return;
- writer_->AddString("},\n");
- writer_->AddString("\"nodes\":[");
- SerializeNodes();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"edges\":[");
- SerializeEdges();
- if (writer_->aborted()) return;
- writer_->AddString("],\n");
- writer_->AddString("\"strings\":[");
- SerializeStrings();
- if (writer_->aborted()) return;
- writer_->AddCharacter(']');
- writer_->AddCharacter('}');
- writer_->Finalize();
-}
-
-
-int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
- HashMap::Entry* cache_entry = strings_.Lookup(
- const_cast<char*>(s), ObjectHash(s), true);
- if (cache_entry->value == NULL) {
- cache_entry->value = reinterpret_cast<void*>(next_string_id_++);
- }
- return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value));
-}
-
-
-static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) {
- int number_of_digits = 0;
- unsigned t = value;
- do {
- ++number_of_digits;
- } while (t /= 10);
-
- buffer_pos += number_of_digits;
- int result = buffer_pos;
- do {
- int last_digit = value % 10;
- buffer[--buffer_pos] = '0' + last_digit;
- value /= 10;
- } while (value);
- return result;
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge,
- bool first_edge) {
- // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0
- static const int kBufferSize =
- MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT
- EmbeddedVector<char, kBufferSize> buffer;
- int edge_name_or_index = edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden
- || edge->type() == HeapGraphEdge::kWeak
- ? edge->index() : GetStringId(edge->name());
- int buffer_pos = 0;
- if (!first_edge) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(edge->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeEdges() {
- List<HeapGraphEdge*>& edges = snapshot_->children();
- for (int i = 0; i < edges.length(); ++i) {
- ASSERT(i == 0 ||
- edges[i - 1]->from()->index() <= edges[i]->from()->index());
- SerializeEdge(edges[i], i == 0);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) {
- // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0
- static const int kBufferSize =
- 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
- + 5 + 1 + 1;
- EmbeddedVector<char, kBufferSize> buffer;
- int buffer_pos = 0;
- if (entry_index(entry) != 0) {
- buffer[buffer_pos++] = ',';
- }
- buffer_pos = utoa(entry->type(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->id(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->self_size(), buffer, buffer_pos);
- buffer[buffer_pos++] = ',';
- buffer_pos = utoa(entry->children_count(), buffer, buffer_pos);
- buffer[buffer_pos++] = '\n';
- buffer[buffer_pos++] = '\0';
- writer_->AddString(buffer.start());
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeNodes() {
- List<HeapEntry>& entries = snapshot_->entries();
- for (int i = 0; i < entries.length(); ++i) {
- SerializeNode(&entries[i]);
- if (writer_->aborted()) return;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeSnapshot() {
- writer_->AddString("\"title\":\"");
- writer_->AddString(snapshot_->title());
- writer_->AddString("\"");
- writer_->AddString(",\"uid\":");
- writer_->AddNumber(snapshot_->uid());
- writer_->AddString(",\"meta\":");
- // The object describing node serialization layout.
- // We use a set of macros to improve readability.
-#define JSON_A(s) "[" s "]"
-#define JSON_O(s) "{" s "}"
-#define JSON_S(s) "\"" s "\""
- writer_->AddString(JSON_O(
- JSON_S("node_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name") ","
- JSON_S("id") ","
- JSON_S("self_size") ","
- JSON_S("edge_count")) ","
- JSON_S("node_types") ":" JSON_A(
- JSON_A(
- JSON_S("hidden") ","
- JSON_S("array") ","
- JSON_S("string") ","
- JSON_S("object") ","
- JSON_S("code") ","
- JSON_S("closure") ","
- JSON_S("regexp") ","
- JSON_S("number") ","
- JSON_S("native") ","
- JSON_S("synthetic")) ","
- JSON_S("string") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number") ","
- JSON_S("number")) ","
- JSON_S("edge_fields") ":" JSON_A(
- JSON_S("type") ","
- JSON_S("name_or_index") ","
- JSON_S("to_node")) ","
- JSON_S("edge_types") ":" JSON_A(
- JSON_A(
- JSON_S("context") ","
- JSON_S("element") ","
- JSON_S("property") ","
- JSON_S("internal") ","
- JSON_S("hidden") ","
- JSON_S("shortcut") ","
- JSON_S("weak")) ","
- JSON_S("string_or_number") ","
- JSON_S("node"))));
-#undef JSON_S
-#undef JSON_O
-#undef JSON_A
- writer_->AddString(",\"node_count\":");
- writer_->AddNumber(snapshot_->entries().length());
- writer_->AddString(",\"edge_count\":");
- writer_->AddNumber(snapshot_->edges().length());
-}
-
-
-static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
- static const char hex_chars[] = "0123456789ABCDEF";
- w->AddString("\\u");
- w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
- w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
- w->AddCharacter(hex_chars[u & 0xf]);
-}
-
-void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
- writer_->AddCharacter('\n');
- writer_->AddCharacter('\"');
- for ( ; *s != '\0'; ++s) {
- switch (*s) {
- case '\b':
- writer_->AddString("\\b");
- continue;
- case '\f':
- writer_->AddString("\\f");
- continue;
- case '\n':
- writer_->AddString("\\n");
- continue;
- case '\r':
- writer_->AddString("\\r");
- continue;
- case '\t':
- writer_->AddString("\\t");
- continue;
- case '\"':
- case '\\':
- writer_->AddCharacter('\\');
- writer_->AddCharacter(*s);
- continue;
- default:
- if (*s > 31 && *s < 128) {
- writer_->AddCharacter(*s);
- } else if (*s <= 31) {
- // Special character with no dedicated literal.
- WriteUChar(writer_, *s);
- } else {
- // Convert UTF-8 into \u UTF-16 literal.
- unsigned length = 1, cursor = 0;
- for ( ; length <= 4 && *(s + length) != '\0'; ++length) { }
- unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
- if (c != unibrow::Utf8::kBadChar) {
- WriteUChar(writer_, c);
- ASSERT(cursor != 0);
- s += cursor - 1;
- } else {
- writer_->AddCharacter('?');
- }
- }
- }
- }
- writer_->AddCharacter('\"');
-}
-
-
-void HeapSnapshotJSONSerializer::SerializeStrings() {
- List<HashMap::Entry*> sorted_strings;
- SortHashMap(&strings_, &sorted_strings);
- writer_->AddString("\"<dummy>\"");
- for (int i = 0; i < sorted_strings.length(); ++i) {
- writer_->AddCharacter(',');
- SerializeString(
- reinterpret_cast<const unsigned char*>(sorted_strings[i]->key));
- if (writer_->aborted()) return;
- }
-}
-
-
-template<typename T>
-inline static int SortUsingEntryValue(const T* x, const T* y) {
- uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value);
- uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value);
- if (x_uint > y_uint) {
- return 1;
- } else if (x_uint == y_uint) {
- return 0;
- } else {
- return -1;
- }
-}
-
-
-void HeapSnapshotJSONSerializer::SortHashMap(
- HashMap* map, List<HashMap::Entry*>* sorted_entries) {
- for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
- sorted_entries->Add(p);
- sorted_entries->Sort(SortUsingEntryValue);
-}
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/profile-generator.h b/src/3rdparty/v8/src/profile-generator.h
index 04f4a1c..8c6c71a 100644
--- a/src/3rdparty/v8/src/profile-generator.h
+++ b/src/3rdparty/v8/src/profile-generator.h
@@ -45,7 +45,8 @@ class TokenEnumerator {
static const int kInheritsSecurityToken = -2;
private:
- static void TokenRemovedCallback(v8::Persistent<v8::Value> handle,
+ static void TokenRemovedCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
void* parameter);
void TokenRemoved(Object** token_location);
@@ -446,655 +447,6 @@ class ProfileGenerator {
};
-class HeapEntry;
-class HeapSnapshot;
-
-class HeapGraphEdge BASE_EMBEDDED {
- public:
- enum Type {
- kContextVariable = v8::HeapGraphEdge::kContextVariable,
- kElement = v8::HeapGraphEdge::kElement,
- kProperty = v8::HeapGraphEdge::kProperty,
- kInternal = v8::HeapGraphEdge::kInternal,
- kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut,
- kWeak = v8::HeapGraphEdge::kWeak
- };
-
- HeapGraphEdge() { }
- HeapGraphEdge(Type type, const char* name, int from, int to);
- HeapGraphEdge(Type type, int index, int from, int to);
- void ReplaceToIndexWithEntry(HeapSnapshot* snapshot);
-
- Type type() const { return static_cast<Type>(type_); }
- int index() const {
- ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
- return index_;
- }
- const char* name() const {
- ASSERT(type_ == kContextVariable
- || type_ == kProperty
- || type_ == kInternal
- || type_ == kShortcut);
- return name_;
- }
- INLINE(HeapEntry* from() const);
- HeapEntry* to() const { return to_entry_; }
-
- private:
- INLINE(HeapSnapshot* snapshot() const);
-
- unsigned type_ : 3;
- int from_index_ : 29;
- union {
- // During entries population |to_index_| is used for storing the index,
- // afterwards it is replaced with a pointer to the entry.
- int to_index_;
- HeapEntry* to_entry_;
- };
- union {
- int index_;
- const char* name_;
- };
-};
-
-
-// HeapEntry instances represent an entity from the heap (or a special
-// virtual node, e.g. root).
-class HeapEntry BASE_EMBEDDED {
- public:
- enum Type {
- kHidden = v8::HeapGraphNode::kHidden,
- kArray = v8::HeapGraphNode::kArray,
- kString = v8::HeapGraphNode::kString,
- kObject = v8::HeapGraphNode::kObject,
- kCode = v8::HeapGraphNode::kCode,
- kClosure = v8::HeapGraphNode::kClosure,
- kRegExp = v8::HeapGraphNode::kRegExp,
- kHeapNumber = v8::HeapGraphNode::kHeapNumber,
- kNative = v8::HeapGraphNode::kNative,
- kSynthetic = v8::HeapGraphNode::kSynthetic
- };
- static const int kNoEntry;
-
- HeapEntry() { }
- HeapEntry(HeapSnapshot* snapshot,
- Type type,
- const char* name,
- SnapshotObjectId id,
- int self_size);
-
- HeapSnapshot* snapshot() { return snapshot_; }
- Type type() { return static_cast<Type>(type_); }
- const char* name() { return name_; }
- void set_name(const char* name) { name_ = name; }
- inline SnapshotObjectId id() { return id_; }
- int self_size() { return self_size_; }
- INLINE(int index() const);
- int children_count() const { return children_count_; }
- INLINE(int set_children_index(int index));
- void add_child(HeapGraphEdge* edge) {
- children_arr()[children_count_++] = edge;
- }
- Vector<HeapGraphEdge*> children() {
- return Vector<HeapGraphEdge*>(children_arr(), children_count_); }
-
- void SetIndexedReference(
- HeapGraphEdge::Type type, int index, HeapEntry* entry);
- void SetNamedReference(
- HeapGraphEdge::Type type, const char* name, HeapEntry* entry);
-
- void Print(
- const char* prefix, const char* edge_name, int max_depth, int indent);
-
- Handle<HeapObject> GetHeapObject();
-
- private:
- INLINE(HeapGraphEdge** children_arr());
- const char* TypeAsString();
-
- unsigned type_: 4;
- int children_count_: 28;
- int children_index_;
- int self_size_;
- SnapshotObjectId id_;
- HeapSnapshot* snapshot_;
- const char* name_;
-};
-
-
-class HeapSnapshotsCollection;
-
-// HeapSnapshot represents a single heap snapshot. It is stored in
-// HeapSnapshotsCollection, which is also a factory for
-// HeapSnapshots. All HeapSnapshots share strings copied from JS heap
-// to be able to return them even if they were collected.
-// HeapSnapshotGenerator fills in a HeapSnapshot.
-class HeapSnapshot {
- public:
- enum Type {
- kFull = v8::HeapSnapshot::kFull
- };
-
- HeapSnapshot(HeapSnapshotsCollection* collection,
- Type type,
- const char* title,
- unsigned uid);
- void Delete();
-
- HeapSnapshotsCollection* collection() { return collection_; }
- Type type() { return type_; }
- const char* title() { return title_; }
- unsigned uid() { return uid_; }
- size_t RawSnapshotSize() const;
- HeapEntry* root() { return &entries_[root_index_]; }
- HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
- HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
- HeapEntry* gc_subroot(int index) {
- return &entries_[gc_subroot_indexes_[index]];
- }
- List<HeapEntry>& entries() { return entries_; }
- List<HeapGraphEdge>& edges() { return edges_; }
- List<HeapGraphEdge*>& children() { return children_; }
- void RememberLastJSObjectId();
- SnapshotObjectId max_snapshot_js_object_id() const {
- return max_snapshot_js_object_id_;
- }
-
- HeapEntry* AddEntry(HeapEntry::Type type,
- const char* name,
- SnapshotObjectId id,
- int size);
- HeapEntry* AddRootEntry();
- HeapEntry* AddGcRootsEntry();
- HeapEntry* AddGcSubrootEntry(int tag);
- HeapEntry* AddNativesRootEntry();
- HeapEntry* GetEntryById(SnapshotObjectId id);
- List<HeapEntry*>* GetSortedEntriesList();
- void FillChildren();
-
- void Print(int max_depth);
- void PrintEntriesSize();
-
- private:
- HeapSnapshotsCollection* collection_;
- Type type_;
- const char* title_;
- unsigned uid_;
- int root_index_;
- int gc_roots_index_;
- int natives_root_index_;
- int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
- List<HeapEntry> entries_;
- List<HeapGraphEdge> edges_;
- List<HeapGraphEdge*> children_;
- List<HeapEntry*> sorted_entries_;
- SnapshotObjectId max_snapshot_js_object_id_;
-
- friend class HeapSnapshotTester;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshot);
-};
-
-
-class HeapObjectsMap {
- public:
- HeapObjectsMap();
-
- void SnapshotGenerationFinished();
- SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
- SnapshotObjectId last_assigned_id() const {
- return next_id_ - kObjectIdStep;
- }
-
- void StopHeapObjectsTracking();
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
- size_t GetUsedMemorySize() const;
-
- static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
- static inline SnapshotObjectId GetNthGcSubrootId(int delta);
-
- static const int kObjectIdStep = 2;
- static const SnapshotObjectId kInternalRootObjectId;
- static const SnapshotObjectId kGcRootsObjectId;
- static const SnapshotObjectId kNativesRootObjectId;
- static const SnapshotObjectId kGcRootsFirstSubrootId;
- static const SnapshotObjectId kFirstAvailableObjectId;
-
- private:
- struct EntryInfo {
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
- : id(id), addr(addr), size(size), accessed(true) { }
- EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed)
- : id(id), addr(addr), size(size), accessed(accessed) { }
- SnapshotObjectId id;
- Address addr;
- unsigned int size;
- bool accessed;
- };
- struct TimeInterval {
- explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
- SnapshotObjectId id;
- uint32_t size;
- uint32_t count;
- };
-
- void UpdateHeapObjectsMap();
- void RemoveDeadEntries();
-
- static bool AddressesMatch(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t AddressHash(Address addr) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)),
- v8::internal::kZeroHashSeed);
- }
-
- SnapshotObjectId next_id_;
- HashMap entries_map_;
- List<EntryInfo> entries_;
- List<TimeInterval> time_intervals_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap);
-};
-
-
-class HeapSnapshotsCollection {
- public:
- HeapSnapshotsCollection();
- ~HeapSnapshotsCollection();
-
- bool is_tracking_objects() { return is_tracking_objects_; }
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) {
- return ids_.PushHeapObjectsStats(stream);
- }
- void StartHeapObjectsTracking() { is_tracking_objects_ = true; }
- void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); }
-
- HeapSnapshot* NewSnapshot(
- HeapSnapshot::Type type, const char* name, unsigned uid);
- void SnapshotGenerationFinished(HeapSnapshot* snapshot);
- List<HeapSnapshot*>* snapshots() { return &snapshots_; }
- HeapSnapshot* GetSnapshot(unsigned uid);
- void RemoveSnapshot(HeapSnapshot* snapshot);
-
- StringsStorage* names() { return &names_; }
- TokenEnumerator* token_enumerator() { return token_enumerator_; }
-
- SnapshotObjectId FindObjectId(Address object_addr) {
- return ids_.FindEntry(object_addr);
- }
- SnapshotObjectId GetObjectId(Address object_addr, int object_size) {
- return ids_.FindOrAddEntry(object_addr, object_size);
- }
- Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
- SnapshotObjectId last_assigned_id() const {
- return ids_.last_assigned_id();
- }
- size_t GetUsedMemorySize() const;
-
- private:
- INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- bool is_tracking_objects_; // Whether tracking object moves is needed.
- List<HeapSnapshot*> snapshots_;
- // Mapping from snapshots' uids to HeapSnapshot* pointers.
- HashMap snapshots_uids_;
- StringsStorage names_;
- TokenEnumerator* token_enumerator_;
- // Mapping from HeapObject addresses to objects' uids.
- HeapObjectsMap ids_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
-};
-
-
-// A typedef for referencing anything that can be snapshotted living
-// in any kind of heap memory.
-typedef void* HeapThing;
-
-
-// An interface that creates HeapEntries by HeapThings.
-class HeapEntriesAllocator {
- public:
- virtual ~HeapEntriesAllocator() { }
- virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0;
-};
-
-
-// The HeapEntriesMap instance is used to track a mapping between
-// real heap objects and their representations in heap snapshots.
-class HeapEntriesMap {
- public:
- HeapEntriesMap();
-
- int Map(HeapThing thing);
- void Pair(HeapThing thing, int entry);
-
- private:
- static uint32_t Hash(HeapThing thing) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)),
- v8::internal::kZeroHashSeed);
- }
- static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
- return key1 == key2;
- }
-
- HashMap entries_;
-
- friend class HeapObjectsSet;
-
- DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap);
-};
-
-
-class HeapObjectsSet {
- public:
- HeapObjectsSet();
- void Clear();
- bool Contains(Object* object);
- void Insert(Object* obj);
- const char* GetTag(Object* obj);
- void SetTag(Object* obj, const char* tag);
- bool is_empty() const { return entries_.occupancy() == 0; }
-
- private:
- HashMap entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet);
-};
-
-
-// An interface used to populate a snapshot with nodes and edges.
-class SnapshotFillerInterface {
- public:
- virtual ~SnapshotFillerInterface() { }
- virtual HeapEntry* AddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual HeapEntry* FindEntry(HeapThing ptr) = 0;
- virtual HeapEntry* FindOrAddEntry(HeapThing ptr,
- HeapEntriesAllocator* allocator) = 0;
- virtual void SetIndexedReference(HeapGraphEdge::Type type,
- int parent_entry,
- int index,
- HeapEntry* child_entry) = 0;
- virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedReference(HeapGraphEdge::Type type,
- int parent_entry,
- const char* reference_name,
- HeapEntry* child_entry) = 0;
- virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
- int parent_entry,
- HeapEntry* child_entry) = 0;
-};
-
-
-class SnapshottingProgressReportingInterface {
- public:
- virtual ~SnapshottingProgressReportingInterface() { }
- virtual void ProgressStep() = 0;
- virtual bool ProgressReport(bool force) = 0;
-};
-
-
-// An implementation of V8 heap graph extractor.
-class V8HeapExplorer : public HeapEntriesAllocator {
- public:
- V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~V8HeapExplorer();
- virtual HeapEntry* AllocateEntry(HeapThing ptr);
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount(HeapIterator* iterator);
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
- void TagGlobalObjects();
-
- static String* GetConstructorName(JSObject* object);
-
- static HeapObject* const kInternalRootObject;
-
- private:
- HeapEntry* AddEntry(HeapObject* object);
- HeapEntry* AddEntry(HeapObject* object,
- HeapEntry::Type type,
- const char* name);
- const char* GetSystemEntryName(HeapObject* object);
-
- void ExtractReferences(HeapObject* obj);
- void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy);
- void ExtractJSObjectReferences(int entry, JSObject* js_obj);
- void ExtractStringReferences(int entry, String* obj);
- void ExtractContextReferences(int entry, Context* context);
- void ExtractMapReferences(int entry, Map* map);
- void ExtractSharedFunctionInfoReferences(int entry,
- SharedFunctionInfo* shared);
- void ExtractScriptReferences(int entry, Script* script);
- void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
- void ExtractCodeReferences(int entry, Code* code);
- void ExtractJSGlobalPropertyCellReferences(int entry,
- JSGlobalPropertyCell* cell);
- void ExtractClosureReferences(JSObject* js_obj, int entry);
- void ExtractPropertyReferences(JSObject* js_obj, int entry);
- void ExtractElementReferences(JSObject* js_obj, int entry);
- void ExtractInternalReferences(JSObject* js_obj, int entry);
- bool IsEssentialObject(Object* object);
- void SetClosureReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child);
- void SetNativeBindReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child);
- void SetElementReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- const char* reference_name,
- Object* child,
- int field_offset = -1);
- void SetInternalReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child,
- int field_offset = -1);
- void SetHiddenReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child);
- void SetWeakReference(HeapObject* parent_obj,
- int parent,
- int index,
- Object* child_obj,
- int field_offset);
- void SetPropertyReference(HeapObject* parent_obj,
- int parent,
- String* reference_name,
- Object* child,
- const char* name_format_string = NULL,
- int field_offset = -1);
- void SetUserGlobalReference(Object* user_global);
- void SetRootGcRootsReference();
- void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
- void SetGcSubrootReference(
- VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
- const char* GetStrongGcSubrootName(Object* object);
- void TagObject(Object* obj, const char* tag);
-
- HeapEntry* GetEntry(Object* obj);
-
- static inline HeapObject* GetNthGcSubrootObject(int delta);
- static inline int GetGcSubrootOrder(HeapObject* subroot);
-
- Heap* heap_;
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- SnapshotFillerInterface* filler_;
- HeapObjectsSet objects_tags_;
- HeapObjectsSet strong_gc_subroot_names_;
-
- static HeapObject* const kGcRootsObject;
- static HeapObject* const kFirstGcSubrootObject;
- static HeapObject* const kLastGcSubrootObject;
-
- friend class IndexedReferencesExtractor;
- friend class GcSubrootsEnumerator;
- friend class RootsReferencesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
-};
-
-
-class NativeGroupRetainedObjectInfo;
-
-
-// An implementation of retained native objects extractor.
-class NativeObjectsExplorer {
- public:
- NativeObjectsExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress);
- virtual ~NativeObjectsExplorer();
- void AddRootEntries(SnapshotFillerInterface* filler);
- int EstimateObjectsCount();
- bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
-
- private:
- void FillRetainedObjects();
- void FillImplicitReferences();
- List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info);
- void SetNativeRootReference(v8::RetainedObjectInfo* info);
- void SetRootNativeRootsReference();
- void SetWrapperNativeReferences(HeapObject* wrapper,
- v8::RetainedObjectInfo* info);
- void VisitSubtreeWrapper(Object** p, uint16_t class_id);
-
- static uint32_t InfoHash(v8::RetainedObjectInfo* info) {
- return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()),
- v8::internal::kZeroHashSeed);
- }
- static bool RetainedInfosMatch(void* key1, void* key2) {
- return key1 == key2 ||
- (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent(
- reinterpret_cast<v8::RetainedObjectInfo*>(key2));
- }
- INLINE(static bool StringsMatch(void* key1, void* key2)) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
- }
-
- NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label);
-
- HeapSnapshot* snapshot_;
- HeapSnapshotsCollection* collection_;
- SnapshottingProgressReportingInterface* progress_;
- bool embedder_queried_;
- HeapObjectsSet in_groups_;
- // RetainedObjectInfo* -> List<HeapObject*>*
- HashMap objects_by_info_;
- HashMap native_groups_;
- HeapEntriesAllocator* synthetic_entries_allocator_;
- HeapEntriesAllocator* native_entries_allocator_;
- // Used during references extraction.
- SnapshotFillerInterface* filler_;
-
- static HeapThing const kNativesRootObject;
-
- friend class GlobalHandlesExtractor;
-
- DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer);
-};
-
-
-class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
- public:
- HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control);
- bool GenerateSnapshot();
-
- private:
- bool FillReferences();
- void ProgressStep();
- bool ProgressReport(bool force = false);
- void SetProgressTotal(int iterations_count);
-
- HeapSnapshot* snapshot_;
- v8::ActivityControl* control_;
- V8HeapExplorer v8_heap_explorer_;
- NativeObjectsExplorer dom_explorer_;
- // Mapping from HeapThing pointers to HeapEntry* pointers.
- HeapEntriesMap entries_;
- // Used during snapshot generation.
- int progress_counter_;
- int progress_total_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
-};
-
-class OutputStreamWriter;
-
-class HeapSnapshotJSONSerializer {
- public:
- explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot)
- : snapshot_(snapshot),
- strings_(ObjectsMatch),
- next_node_id_(1),
- next_string_id_(1),
- writer_(NULL) {
- }
- void Serialize(v8::OutputStream* stream);
-
- private:
- INLINE(static bool ObjectsMatch(void* key1, void* key2)) {
- return key1 == key2;
- }
-
- INLINE(static uint32_t ObjectHash(const void* key)) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)),
- v8::internal::kZeroHashSeed);
- }
-
- HeapSnapshot* CreateFakeSnapshot();
- int GetStringId(const char* s);
- int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; }
- void SerializeEdge(HeapGraphEdge* edge, bool first_edge);
- void SerializeEdges();
- void SerializeImpl();
- void SerializeNode(HeapEntry* entry);
- void SerializeNodes();
- void SerializeSnapshot();
- void SerializeString(const unsigned char* s);
- void SerializeStrings();
- void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries);
-
- static const int kEdgeFieldsCount;
- static const int kNodeFieldsCount;
-
- HeapSnapshot* snapshot_;
- HashMap strings_;
- int next_node_id_;
- int next_string_id_;
- OutputStreamWriter* writer_;
-
- friend class HeapSnapshotJSONSerializerEnumerator;
- friend class HeapSnapshotJSONSerializerIterator;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer);
-};
-
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_H_
diff --git a/src/3rdparty/v8/src/property-details.h b/src/3rdparty/v8/src/property-details.h
index 64e3205..510e985 100644
--- a/src/3rdparty/v8/src/property-details.h
+++ b/src/3rdparty/v8/src/property-details.h
@@ -38,6 +38,10 @@ enum PropertyAttributes {
READ_ONLY = v8::ReadOnly,
DONT_ENUM = v8::DontEnum,
DONT_DELETE = v8::DontDelete,
+
+ SEALED = DONT_ENUM | DONT_DELETE,
+ FROZEN = SEALED | READ_ONLY,
+
ABSENT = 16 // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
diff --git a/src/3rdparty/v8/src/property.cc b/src/3rdparty/v8/src/property.cc
index d05ef2b..c2ea422 100644
--- a/src/3rdparty/v8/src/property.cc
+++ b/src/3rdparty/v8/src/property.cc
@@ -63,7 +63,7 @@ void LookupResult::Print(FILE* out) {
break;
case FIELD:
FPrintF(out, " -type = field\n");
- FPrintF(out, " -index = %d", GetFieldIndex());
+ FPrintF(out, " -index = %d", GetFieldIndex().field_index());
FPrintF(out, "\n");
break;
case CALLBACKS:
diff --git a/src/3rdparty/v8/src/property.h b/src/3rdparty/v8/src/property.h
index 3faa28b..941b51d 100644
--- a/src/3rdparty/v8/src/property.h
+++ b/src/3rdparty/v8/src/property.h
@@ -48,9 +48,9 @@ class Descriptor BASE_EMBEDDED {
return Smi::cast(value)->value();
}
- MUST_USE_RESULT MaybeObject* KeyToSymbol() {
- if (!StringShape(key_).IsSymbol()) {
- MaybeObject* maybe_result = HEAP->LookupSymbol(key_);
+ MUST_USE_RESULT MaybeObject* KeyToInternalizedString() {
+ if (!StringShape(key_).IsInternalized()) {
+ MaybeObject* maybe_result = HEAP->InternalizeString(key_);
if (!maybe_result->To(&key_)) return maybe_result;
}
return key_;
@@ -132,6 +132,56 @@ class CallbacksDescriptor: public Descriptor {
};
+// Holds a property index value distinguishing if it is a field index or an
+// index inside the object header.
+class PropertyIndex {
+ public:
+ static PropertyIndex NewFieldIndex(int index) {
+ return PropertyIndex(index, false);
+ }
+ static PropertyIndex NewHeaderIndex(int index) {
+ return PropertyIndex(index, true);
+ }
+
+ bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; }
+ bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; }
+
+ int field_index() {
+ ASSERT(is_field_index());
+ return value();
+ }
+ int header_index() {
+ ASSERT(is_header_index());
+ return value();
+ }
+
+ bool is_inobject(Handle<JSObject> holder) {
+ if (is_header_index()) return true;
+ return field_index() < holder->map()->inobject_properties();
+ }
+
+ int translate(Handle<JSObject> holder) {
+ if (is_header_index()) return header_index();
+ int index = field_index() - holder->map()->inobject_properties();
+ if (index >= 0) return index;
+ return index + holder->map()->instance_size() / kPointerSize;
+ }
+
+ private:
+ static const int kHeaderIndexBit = 1 << 31;
+ static const int kIndexMask = ~kHeaderIndexBit;
+
+ int value() { return index_ & kIndexMask; }
+
+ PropertyIndex(int index, bool is_header_based)
+ : index_(index | (is_header_based ? kHeaderIndexBit : 0)) {
+ ASSERT(index <= kIndexMask);
+ }
+
+ int index_;
+};
+
+
class LookupResult BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
@@ -145,10 +195,12 @@ class LookupResult BASE_EMBEDDED {
}
~LookupResult() {
- ASSERT(isolate_->top_lookup_result() == this);
- isolate_->SetTopLookupResult(next_);
+ ASSERT(isolate()->top_lookup_result() == this);
+ isolate()->SetTopLookupResult(next_);
}
+ Isolate* isolate() const { return isolate_; }
+
void DescriptorResult(JSObject* holder, PropertyDetails details, int number) {
lookup_type_ = DESCRIPTOR_TYPE;
holder_ = holder;
@@ -163,16 +215,6 @@ class LookupResult BASE_EMBEDDED {
number_ = number;
}
- void ConstantResult(JSObject* holder) {
- lookup_type_ = CONSTANT_TYPE;
- holder_ = holder;
- details_ =
- PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM |
- DONT_DELETE),
- CALLBACKS);
- number_ = -1;
- }
-
void DictionaryResult(JSObject* holder, int entry) {
lookup_type_ = DICTIONARY_TYPE;
holder_ = holder;
@@ -272,13 +314,33 @@ class LookupResult BASE_EMBEDDED {
return IsFound() && !IsTransition();
}
+ bool IsDataProperty() {
+ switch (type()) {
+ case FIELD:
+ case NORMAL:
+ case CONSTANT_FUNCTION:
+ return true;
+ case CALLBACKS: {
+ Object* callback = GetCallbackObject();
+ return callback->IsAccessorInfo() || callback->IsForeign();
+ }
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+ }
+
bool IsCacheable() { return cacheable_; }
void DisallowCaching() { cacheable_ = false; }
Object* GetLazyValue() {
switch (type()) {
case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex());
+ return holder()->FastPropertyAt(GetFieldIndex().field_index());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
@@ -289,9 +351,15 @@ class LookupResult BASE_EMBEDDED {
}
case CONSTANT_FUNCTION:
return GetConstantFunction();
- default:
- return Isolate::Current()->heap()->the_hole_value();
+ case CALLBACKS:
+ case HANDLER:
+ case INTERCEPTOR:
+ case TRANSITION:
+ case NONEXISTENT:
+ return isolate()->heap()->the_hole_value();
}
+ UNREACHABLE();
+ return NULL;
}
Map* GetTransitionTarget() {
@@ -334,10 +402,11 @@ class LookupResult BASE_EMBEDDED {
return number_;
}
- int GetFieldIndex() {
+ PropertyIndex GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(IsField());
- return Descriptor::IndexFromValue(GetValue());
+ return PropertyIndex::NewFieldIndex(
+ Descriptor::IndexFromValue(GetValue()));
}
int GetLocalFieldIndexFromMap(Map* map) {
@@ -362,10 +431,7 @@ class LookupResult BASE_EMBEDDED {
}
Object* GetCallbackObject() {
- if (lookup_type_ == CONSTANT_TYPE) {
- return HEAP->prototype_accessors();
- }
- ASSERT(!IsTransition());
+ ASSERT(type() == CALLBACKS && !IsTransition());
return GetValue();
}
@@ -401,8 +467,7 @@ class LookupResult BASE_EMBEDDED {
TRANSITION_TYPE,
DICTIONARY_TYPE,
HANDLER_TYPE,
- INTERCEPTOR_TYPE,
- CONSTANT_TYPE
+ INTERCEPTOR_TYPE
} lookup_type_;
JSReceiver* holder_;
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.cc b/src/3rdparty/v8/src/regexp-macro-assembler.cc
index 82ba34d..3ebf5a8 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler.cc
+++ b/src/3rdparty/v8/src/regexp-macro-assembler.cc
@@ -77,14 +77,14 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
ASSERT(subject->IsExternalString() || subject->IsSeqString());
ASSERT(start_index >= 0);
ASSERT(start_index <= subject->length());
- if (subject->IsAsciiRepresentation()) {
+ if (subject->IsOneByteRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
- const char* data = ExternalAsciiString::cast(subject)->GetChars();
+ const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
- ASSERT(subject->IsSeqAsciiString());
- char* data = SeqAsciiString::cast(subject)->GetChars();
+ ASSERT(subject->IsSeqOneByteString());
+ const uint8_t* data = SeqOneByteString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
}
return address + start_index;
@@ -133,7 +133,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
slice_offset = slice->offset();
}
// Ensure that an underlying string has the same ASCII-ness.
- bool is_ascii = subject_ptr->IsAsciiRepresentation();
+ bool is_ascii = subject_ptr->IsOneByteRepresentation();
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1;
@@ -210,6 +210,26 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = {
0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o'
0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w'
0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z'
+ // Latin-1 range
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
+ 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u,
};
diff --git a/src/3rdparty/v8/src/regexp-macro-assembler.h b/src/3rdparty/v8/src/regexp-macro-assembler.h
index bcf3673..211ab6b 100644
--- a/src/3rdparty/v8/src/regexp-macro-assembler.h
+++ b/src/3rdparty/v8/src/regexp-macro-assembler.h
@@ -244,10 +244,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
static const byte* StringCharacterPosition(String* subject, int start_index);
- // Byte map of ASCII characters with a 0xff if the character is a word
+ // Byte map of one byte characters with a 0xff if the character is a word
// character (digit, letter or underscore) and 0x00 otherwise.
// Used by generated RegExp code.
- static const byte word_character_map[128];
+ static const byte word_character_map[256];
static Address word_character_map_address() {
return const_cast<Address>(&word_character_map[0]);
diff --git a/src/3rdparty/v8/src/regexp.js b/src/3rdparty/v8/src/regexp.js
index a3675f0..2349ca7 100644
--- a/src/3rdparty/v8/src/regexp.js
+++ b/src/3rdparty/v8/src/regexp.js
@@ -132,21 +132,13 @@ function BuildResultFromMatchInfo(lastMatchInfo, s) {
var start = lastMatchInfo[CAPTURE0];
var end = lastMatchInfo[CAPTURE1];
var result = %_RegExpConstructResult(numResults, start, s);
- if (start + 1 == end) {
- result[0] = %_StringCharAt(s, start);
- } else {
- result[0] = %_SubString(s, start, end);
- }
+ result[0] = %_SubString(s, start, end);
var j = REGEXP_FIRST_CAPTURE + 2;
for (var i = 1; i < numResults; i++) {
start = lastMatchInfo[j++];
if (start != -1) {
end = lastMatchInfo[j];
- if (start + 1 == end) {
- result[i] = %_StringCharAt(s, start);
- } else {
- result[i] = %_SubString(s, start, end);
- }
+ result[i] = %_SubString(s, start, end);
}
j++;
}
@@ -161,6 +153,7 @@ function RegExpExecNoTests(regexp, string, start) {
lastMatchInfoOverride = null;
return BuildResultFromMatchInfo(matchInfo, string);
}
+ regexp.lastIndex = 0;
return null;
}
@@ -193,7 +186,7 @@ function RegExpExec(string) {
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
- if (global) this.lastIndex = 0;
+ this.lastIndex = 0;
return null;
}
@@ -256,7 +249,10 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) return false;
+ if (matchIndices === null) {
+ this.lastIndex = 0;
+ return false;
+ }
lastMatchInfoOverride = null;
return true;
}
@@ -266,7 +262,7 @@ function TrimRegExp(regexp) {
if (!%_ObjectEquals(regexp_key, regexp)) {
regexp_key = regexp;
regexp_val =
- new $RegExp(SubString(regexp.source, 2, regexp.source.length),
+ new $RegExp(%_SubString(regexp.source, 2, regexp.source.length),
(regexp.ignoreCase ? regexp.multiline ? "im" : "i"
: regexp.multiline ? "m" : ""));
}
@@ -296,9 +292,9 @@ function RegExpGetLastMatch() {
return OVERRIDE_MATCH(lastMatchInfoOverride);
}
var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- return SubString(regExpSubject,
- lastMatchInfo[CAPTURE0],
- lastMatchInfo[CAPTURE1]);
+ return %_SubString(regExpSubject,
+ lastMatchInfo[CAPTURE0],
+ lastMatchInfo[CAPTURE1]);
}
@@ -317,7 +313,7 @@ function RegExpGetLastParen() {
var start = lastMatchInfo[CAPTURE(length - 2)];
var end = lastMatchInfo[CAPTURE(length - 1)];
if (start != -1 && end != -1) {
- return SubString(regExpSubject, start, end);
+ return %_SubString(regExpSubject, start, end);
}
return "";
}
@@ -334,7 +330,7 @@ function RegExpGetLeftContext() {
start_index = OVERRIDE_POS(override);
subject = OVERRIDE_SUBJECT(override);
}
- return SubString(subject, 0, start_index);
+ return %_SubString(subject, 0, start_index);
}
@@ -350,7 +346,7 @@ function RegExpGetRightContext() {
var match = OVERRIDE_MATCH(override);
start_index = OVERRIDE_POS(override) + match.length;
}
- return SubString(subject, start_index, subject.length);
+ return %_SubString(subject, start_index, subject.length);
}
@@ -370,7 +366,7 @@ function RegExpMakeCaptureGetter(n) {
var matchStart = lastMatchInfo[CAPTURE(index)];
var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
if (matchStart == -1 || matchEnd == -1) return '';
- return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
+ return %_SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
};
}
@@ -381,7 +377,7 @@ function RegExpMakeCaptureGetter(n) {
// pairs for the match and all the captured substrings), the invariant is
// that there are at least two capture indeces. The array also contains
// the subject string for the last successful match.
-var lastMatchInfo = new InternalArray(
+var lastMatchInfo = new InternalPackedArray(
2, // REGEXP_NUMBER_OF_CAPTURES
"", // Last subject.
void 0, // Last input - settable with RegExpSetInput.
diff --git a/src/3rdparty/v8/src/rewriter.cc b/src/3rdparty/v8/src/rewriter.cc
index 6541546..44fe050 100644
--- a/src/3rdparty/v8/src/rewriter.cc
+++ b/src/3rdparty/v8/src/rewriter.cc
@@ -43,7 +43,9 @@ class Processor: public AstVisitor {
result_assigned_(false),
is_set_(false),
in_try_(false),
- factory_(isolate(), zone) { }
+ factory_(Isolate::Current(), zone) {
+ InitializeAstVisitor();
+ }
virtual ~Processor() { }
@@ -86,6 +88,8 @@ class Processor: public AstVisitor {
#undef DEF_VISIT
void VisitIterationStatement(IterationStatement* stmt);
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
};
@@ -109,6 +113,13 @@ void Processor::VisitBlock(Block* node) {
}
+void Processor::VisitModuleStatement(ModuleStatement* node) {
+ bool set_after_body = is_set_;
+ Visit(node->body());
+ is_set_ = is_set_ && set_after_body;
+}
+
+
void Processor::VisitExpressionStatement(ExpressionStatement* node) {
// Rewrite : <x>; -> .result = <x>;
if (!is_set_ && !node->expression()->IsThrow()) {
@@ -242,7 +253,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
ZoneList<Statement*>* body = function->body();
if (!body->is_empty()) {
Variable* result = scope->NewTemporary(
- info->isolate()->factory()->result_symbol());
+ info->isolate()->factory()->result_string());
Processor processor(result, info->zone());
processor.Process(body);
if (processor.HasStackOverflow()) return false;
@@ -257,7 +268,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// coincides with the end of the with scope which is the position of '1'.
int position = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, Interface::NewValue(), position);
+ result->name(), false, result->interface(), position);
result_proxy->BindTo(result);
Statement* result_statement =
processor.factory()->NewReturnStatement(result_proxy);
diff --git a/src/3rdparty/v8/src/runtime-profiler.cc b/src/3rdparty/v8/src/runtime-profiler.cc
index 23f41fa..94a5650 100644
--- a/src/3rdparty/v8/src/runtime-profiler.cc
+++ b/src/3rdparty/v8/src/runtime-profiler.cc
@@ -140,6 +140,9 @@ static void GetICCounts(JSFunction* function,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
+ // If we are in manual mode, don't auto-optimize anything.
+ if (FLAG_manual_parallel_recompilation) return;
+
if (FLAG_trace_opt) {
PrintF("[marking ");
function->PrintName();
@@ -193,16 +196,9 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- bool found_code = false;
Code* stack_check_code = NULL;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- found_code = check_stub.FindCodeInCache(&stack_check_code);
- }
+ InterruptStub interrupt_stub;
+ bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
@@ -376,12 +372,6 @@ void RuntimeProfiler::OptimizeNow() {
}
-void RuntimeProfiler::NotifyTick() {
- if (FLAG_count_based_interrupts) return;
- isolate_->stack_guard()->RequestRuntimeProfilerTick();
-}
-
-
void RuntimeProfiler::SetUp() {
ASSERT(has_been_globally_set_up_);
if (!FLAG_watch_ic_patching) {
@@ -440,11 +430,6 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
}
-bool RuntimeProfiler::IsSomeIsolateInJS() {
- return NoBarrier_Load(&state_) > 0;
-}
-
-
bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
ASSERT(old_state >= -1);
@@ -494,12 +479,4 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
}
-bool RuntimeProfilerRateLimiter::SuspendIfNecessary() {
- if (!RuntimeProfiler::IsSomeIsolateInJS()) {
- return RuntimeProfiler::WaitForSomeIsolateToEnterJS();
- }
- return false;
-}
-
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/runtime-profiler.h b/src/3rdparty/v8/src/runtime-profiler.h
index ab6cb37..62c48c7 100644
--- a/src/3rdparty/v8/src/runtime-profiler.h
+++ b/src/3rdparty/v8/src/runtime-profiler.h
@@ -52,8 +52,6 @@ class RuntimeProfiler {
void OptimizeNow();
- void NotifyTick();
-
void SetUp();
void Reset();
void TearDown();
@@ -73,17 +71,12 @@ class RuntimeProfiler {
// Profiler thread interface.
//
- // IsSomeIsolateInJS():
- // The profiler thread can query whether some isolate is currently
- // running JavaScript code.
- //
// WaitForSomeIsolateToEnterJS():
// When no isolates are running JavaScript code for some time the
// profiler thread suspends itself by calling the wait function. The
// wait function returns true after it waited or false immediately.
// While the function was waiting the profiler may have been
// disabled so it *must check* whether it is allowed to continue.
- static bool IsSomeIsolateInJS();
static bool WaitForSomeIsolateToEnterJS();
// Stops the runtime profiler thread when profiling support is being
@@ -136,24 +129,6 @@ class RuntimeProfiler {
};
-// Rate limiter intended to be used in the profiler thread.
-class RuntimeProfilerRateLimiter BASE_EMBEDDED {
- public:
- RuntimeProfilerRateLimiter() {}
-
- // Suspends the current thread (which must be the profiler thread)
- // when not executing JavaScript to minimize CPU usage. Returns
- // whether the thread was suspended (and so must check whether
- // profiling is still active.)
- //
- // Does nothing when runtime profiling is not enabled.
- bool SuspendIfNecessary();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter);
-};
-
-
// Implementation of RuntimeProfiler inline functions.
void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
diff --git a/src/3rdparty/v8/src/runtime.cc b/src/3rdparty/v8/src/runtime.cc
index 08b0cd1..191e717 100644
--- a/src/3rdparty/v8/src/runtime.cc
+++ b/src/3rdparty/v8/src/runtime.cc
@@ -45,10 +45,10 @@
#include "global-handles.h"
#include "isolate-inl.h"
#include "jsregexp.h"
+#include "jsregexp-inl.h"
#include "json-parser.h"
#include "json-stringifier.h"
#include "liveedit.h"
-#include "liveobjectlist-inl.h"
#include "misc-intrinsics.h"
#include "parser.h"
#include "platform.h"
@@ -58,6 +58,7 @@
#include "smart-pointers.h"
#include "string-search.h"
#include "stub-cache.h"
+#include "uri.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -286,40 +287,41 @@ static Handle<Map> ComputeObjectLiteralMap(
Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
- // Check that there are only symbols and array indices among keys.
- int number_of_symbol_keys = 0;
+ // Check that there are only internal strings and array indices among keys.
+ int number_of_string_keys = 0;
for (int p = 0; p != properties_length; p += 2) {
Object* key = constant_properties->get(p);
uint32_t element_index = 0;
- if (key->IsSymbol()) {
- number_of_symbol_keys++;
+ if (key->IsInternalizedString()) {
+ number_of_string_keys++;
} else if (key->ToArrayIndex(&element_index)) {
// An index key does not require space in the property backing store.
number_of_properties--;
} else {
- // Bail out as a non-symbol non-index key makes caching impossible.
+ // Bail out as a non-internalized-string non-index key makes caching
+ // impossible.
// ASSERT to make sure that the if condition after the loop is false.
- ASSERT(number_of_symbol_keys != number_of_properties);
+ ASSERT(number_of_string_keys != number_of_properties);
break;
}
}
- // If we only have symbols and array indices among keys then we can
- // use the map cache in the native context.
+ // If we only have internalized strings and array indices among keys then we
+ // can use the map cache in the native context.
const int kMaxKeys = 10;
- if ((number_of_symbol_keys == number_of_properties) &&
- (number_of_symbol_keys < kMaxKeys)) {
+ if ((number_of_string_keys == number_of_properties) &&
+ (number_of_string_keys < kMaxKeys)) {
// Create the fixed array with the key.
Handle<FixedArray> keys =
- isolate->factory()->NewFixedArray(number_of_symbol_keys);
- if (number_of_symbol_keys > 0) {
+ isolate->factory()->NewFixedArray(number_of_string_keys);
+ if (number_of_string_keys > 0) {
int index = 0;
for (int p = 0; p < properties_length; p += 2) {
Object* key = constant_properties->get(p);
- if (key->IsSymbol()) {
+ if (key->IsInternalizedString()) {
keys->set(index++, key);
}
}
- ASSERT(index == number_of_symbol_keys);
+ ASSERT(index == number_of_string_keys);
}
*is_result_from_cache = true;
return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
@@ -392,7 +394,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
}
Handle<Object> result;
uint32_t element_index = 0;
- if (key->IsSymbol()) {
+ if (key->IsInternalizedString()) {
if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
// Array index as string (uint32).
result = JSObject::SetOwnElement(
@@ -668,7 +670,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
isolate->heap()->fixed_cow_array_map()) {
isolate->counters()->cow_arrays_created_runtime()->Increment();
}
- return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate));
+
+ JSObject* boilerplate_object = JSObject::cast(*boilerplate);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(
+ boilerplate_object->GetElementsKind());
+ if (mode == TRACK_ALLOCATION_SITE) {
+ return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object);
+ }
+
+ return isolate->heap()->CopyJSObject(boilerplate_object);
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) {
+ NoHandleAllocation ha(isolate);
+ ASSERT(args.length() == 0);
+ return isolate->heap()->AllocateSymbol();
}
@@ -753,7 +770,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetAdd(table, key);
holder->set_table(*table);
@@ -765,7 +782,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
return isolate->heap()->ToBoolean(table->Contains(*key));
}
@@ -775,7 +792,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<Object> key(args[1]);
+ Handle<Object> key(args[1], isolate);
Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
table = ObjectHashSetRemove(table, key);
holder->set_table(*table);
@@ -808,7 +825,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
@@ -819,7 +836,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -830,7 +847,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
holder->set_table(*new_table);
@@ -860,10 +877,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+static JSWeakMap* WeakMapInitialize(Isolate* isolate,
+ Handle<JSWeakMap> weakmap) {
ASSERT(weakmap->map()->inobject_properties() == 0);
Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0);
weakmap->set_table(*table);
@@ -872,13 +887,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
+ return WeakMapInitialize(isolate, weakmap);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) {
HandleScope scope(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup;
}
@@ -889,7 +912,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
return isolate->heap()->ToBoolean(!lookup->IsTheHole());
}
@@ -900,7 +923,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) {
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
- Handle<Object> lookup(table->Lookup(*key));
+ Handle<Object> lookup(table->Lookup(*key), isolate);
Handle<ObjectHashTable> new_table =
PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value());
weakmap->set_table(*new_table);
@@ -913,7 +936,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
ASSERT(args.length() == 3);
CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1);
- Handle<Object> value(args[2]);
+ Handle<Object> value(args[2], isolate);
Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table()));
Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value);
weakmap->set_table(*new_table);
@@ -922,7 +945,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
if (!obj->IsJSObject()) return isolate->heap()->null_value();
@@ -931,7 +954,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
Object* obj = input_obj;
@@ -940,12 +963,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
do {
if (obj->IsAccessCheckNeeded() &&
!isolate->MayNamedAccess(JSObject::cast(obj),
- isolate->heap()->Proto_symbol(),
+ isolate->heap()->proto_string(),
v8::ACCESS_GET)) {
isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET);
return isolate->heap()->undefined_value();
}
- obj = obj->GetPrototype();
+ obj = obj->GetPrototype(isolate);
} while (obj->IsJSObject() &&
JSObject::cast(obj)->map()->is_hidden_prototype());
return obj;
@@ -953,13 +976,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
// See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
Object* O = args[0];
Object* V = args[1];
while (true) {
- Object* prototype = V->GetPrototype();
+ Object* prototype = V->GetPrototype(isolate);
if (prototype->IsNull()) return isolate->heap()->false_value();
if (O == prototype) return isolate->heap()->true_value();
V = prototype;
@@ -967,104 +990,107 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
}
-// Recursively traverses hidden prototypes if property is not found
-static void GetOwnPropertyImplementation(JSObject* obj,
- String* name,
- LookupResult* result) {
- obj->LocalLookupRealNamedProperty(name, result);
+static bool CheckAccessException(Object* callback,
+ v8::AccessType access_type) {
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ return
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ }
+ return false;
+}
- if (result->IsFound()) return;
- Object* proto = obj->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype())
- GetOwnPropertyImplementation(JSObject::cast(proto),
- name, result);
+template<class Key>
+static bool CheckGenericAccess(
+ JSObject* receiver,
+ JSObject* holder,
+ Key key,
+ v8::AccessType access_type,
+ bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
+ Isolate* isolate = receiver->GetIsolate();
+ for (JSObject* current = receiver;
+ true;
+ current = JSObject::cast(current->GetPrototype())) {
+ if (current->IsAccessCheckNeeded() &&
+ !(isolate->*mayAccess)(current, key, access_type)) {
+ return false;
+ }
+ if (current == holder) break;
+ }
+ return true;
}
-static bool CheckAccessException(LookupResult* result,
- v8::AccessType access_type) {
- if (result->type() == CALLBACKS) {
- Object* callback = result->GetCallbackObject();
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- bool can_access =
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- return can_access;
- }
+enum AccessCheckResult {
+ ACCESS_FORBIDDEN,
+ ACCESS_ALLOWED,
+ ACCESS_ABSENT
+};
+
+
+static AccessCheckResult CheckElementAccess(
+ JSObject* obj,
+ uint32_t index,
+ v8::AccessType access_type) {
+ // TODO(1095): we should traverse hidden prototype hierachy as well.
+ if (CheckGenericAccess(
+ obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
+ return ACCESS_ALLOWED;
}
- return false;
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ return ACCESS_FORBIDDEN;
}
-static bool CheckAccess(JSObject* obj,
- String* name,
- LookupResult* result,
- v8::AccessType access_type) {
- ASSERT(result->IsProperty());
-
- JSObject* holder = result->holder();
- JSObject* current = obj;
- Isolate* isolate = obj->GetIsolate();
- while (true) {
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(current, name, access_type)) {
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
- break;
- }
+static AccessCheckResult CheckPropertyAccess(
+ JSObject* obj,
+ String* name,
+ v8::AccessType access_type) {
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ return CheckElementAccess(obj, index, access_type);
+ }
- if (current == holder) {
- return true;
- }
+ LookupResult lookup(obj->GetIsolate());
+ obj->LocalLookup(name, &lookup, true);
- current = JSObject::cast(current->GetPrototype());
+ if (!lookup.IsProperty()) return ACCESS_ABSENT;
+ if (CheckGenericAccess<Object*>(
+ obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
+ return ACCESS_ALLOWED;
}
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
// API callbacks can have per callback access exceptions.
- switch (result->type()) {
- case CALLBACKS: {
- if (CheckAccessException(result, access_type)) {
- return true;
+ switch (lookup.type()) {
+ case CALLBACKS:
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
+ return ACCESS_ALLOWED;
}
break;
- }
- case INTERCEPTOR: {
+ case INTERCEPTOR:
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- holder->LookupRealNamedProperty(name, result);
- if (result->IsProperty()) {
- if (CheckAccessException(result, access_type)) {
- return true;
+ lookup.holder()->LookupRealNamedProperty(name, &lookup);
+ if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
+ if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
+ return ACCESS_ALLOWED;
}
}
break;
- }
default:
break;
}
- isolate->ReportFailedAccessCheck(current, access_type);
- return false;
-}
-
-
-// TODO(1095): we should traverse hidden prototype hierachy as well.
-static bool CheckElementAccess(JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- if (obj->IsAccessCheckNeeded() &&
- !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
- return false;
- }
-
- return true;
+ obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
+ return ACCESS_FORBIDDEN;
}
@@ -1085,141 +1111,44 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
Handle<JSObject> obj,
Handle<String> name) {
Heap* heap = isolate->heap();
- Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
- LookupResult result(isolate);
- // This could be an element.
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- switch (obj->GetLocalElementType(index)) {
- case JSObject::UNDEFINED_ELEMENT:
- return heap->undefined_value();
-
- case JSObject::STRING_CHARACTER_ELEMENT: {
- // Special handling of string objects according to ECMAScript 5
- // 15.5.5.2. Note that this might be a string object with elements
- // other than the actual string value. This is covered by the
- // subsequent cases.
- Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
- Handle<String> str(String::cast(js_value->value()));
- Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
-
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(VALUE_INDEX, *substr);
- elms->set(WRITABLE_INDEX, heap->false_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->false_value());
- return *desc;
- }
-
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT: {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- RETURN_IF_EMPTY_HANDLE(isolate, value);
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->true_value());
- elms->set(ENUMERABLE_INDEX, heap->true_value());
- elms->set(CONFIGURABLE_INDEX, heap->true_value());
- return *desc;
- }
-
- case JSObject::DICTIONARY_ELEMENT: {
- Handle<JSObject> holder = obj;
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (proto->IsNull()) return heap->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- holder = Handle<JSObject>(JSObject::cast(proto));
- }
- FixedArray* elements = FixedArray::cast(holder->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() == heap->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- switch (details.type()) {
- case CALLBACKS: {
- // This is an accessor property with getter and/or setter.
- AccessorPair* accessors =
- AccessorPair::cast(dictionary->ValueAt(entry));
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
- if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
- elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
- }
- if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
- elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
- }
- break;
- }
- case NORMAL: {
- // This is a data property.
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- Handle<Object> value = Object::GetElement(obj, index);
- ASSERT(!value.is_null());
- elms->set(VALUE_INDEX, *value);
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
- return *desc;
- }
- }
- }
-
- // Use recursive implementation to also traverse hidden prototypes
- GetOwnPropertyImplementation(*obj, *name, &result);
-
- if (!result.IsProperty()) {
- return heap->undefined_value();
+ // Due to some WebKit tests, we want to make sure that we do not log
+ // more than one access failure here.
+ switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) {
+ case ACCESS_FORBIDDEN: return heap->false_value();
+ case ACCESS_ALLOWED: break;
+ case ACCESS_ABSENT: return heap->undefined_value();
}
- if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
- return heap->false_value();
- }
-
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
-
- bool is_js_accessor = result.IsPropertyCallbacks() &&
- (result.GetCallbackObject()->IsAccessorPair());
+ PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
+ if (attrs == ABSENT) return heap->undefined_value();
+ AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
+ Handle<AccessorPair> accessors(raw_accessors, isolate);
- if (is_js_accessor) {
- // __defineGetter__/__defineSetter__ callback.
- elms->set(IS_ACCESSOR_INDEX, heap->true_value());
-
- AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
+ Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
+ elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL));
+
+ if (raw_accessors == NULL) {
+ elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
+ // GetProperty does access check.
+ Handle<Object> value = GetProperty(isolate, obj, name);
+ if (value.is_null()) return Failure::Exception();
+ elms->set(VALUE_INDEX, *value);
+ } else {
+ // Access checks are performed for both accessors separately.
+ // When they fail, the respective field is not set in the descriptor.
Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
+ Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
+ if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, getter);
}
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
+ if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
elms->set(SETTER_INDEX, setter);
}
- } else {
- elms->set(IS_ACCESSOR_INDEX, heap->false_value());
- elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
-
- PropertyAttributes attrs;
- Object* value;
- // GetProperty will check access and report any violations.
- { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
- if (!maybe_value->ToObject(&value)) return maybe_value;
- }
- elms->set(VALUE_INDEX, value);
}
- return *desc;
+ return *isolate->factory()->NewJSArrayWithElements(elms);
}
@@ -1362,8 +1291,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
CONVERT_SMI_ARG_CHECKED(flags, 2);
- Handle<JSObject> js_global = Handle<JSObject>(isolate->context()->global_object());
- Handle<JSObject> qml_global = Handle<JSObject>(isolate->context()->qml_global_object());
+ Handle<JSObject> js_global =
+ Handle<JSObject>(isolate->context()->global_object());
+ Handle<JSObject> qml_global =
+ Handle<JSObject>(isolate->context()->qml_global_object());
// Traverse the name/value pairs and set the properties.
int length = pairs->length();
@@ -1371,7 +1302,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
HandleScope scope(isolate);
Handle<String> name(String::cast(pairs->get(i)));
Handle<Object> value(pairs->get(i + 1), isolate);
- Handle<Object> is_qml_global(pairs->get(i + 2));
+ Handle<Object> is_qml_global(pairs->get(i + 2), isolate);
ASSERT(is_qml_global->IsBoolean());
Handle<JSObject> global = is_qml_global->IsTrue() ? qml_global : js_global;
@@ -1382,8 +1313,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
bool is_var = value->IsUndefined();
bool is_const = value->IsTheHole();
bool is_function = value->IsSharedFunctionInfo();
- bool is_module = value->IsJSModule();
- ASSERT(is_var + is_const + is_function + is_module == 1);
+ ASSERT(is_var + is_const + is_function == 1);
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
@@ -1391,13 +1321,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
if (FLAG_es52_globals) {
- Object* obj = *global;
- do {
- JSObject::cast(obj)->LocalLookup(*name, &lookup, true);
- if (lookup.IsFound()) break;
- obj = obj->GetPrototype();
- } while (obj->IsJSObject() &&
- JSObject::cast(obj)->map()->is_hidden_prototype());
+ global->LocalLookup(*name, &lookup, true, true);
} else {
global->Lookup(*name, &lookup, true);
}
@@ -1427,24 +1351,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// the property must be non-configurable except in eval.
int attr = NONE;
bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
- if (!is_eval || is_module) {
+ if (!is_eval) {
attr |= DONT_DELETE;
}
bool is_native = DeclareGlobalsNativeFlag::decode(flags);
- if (is_const || is_module || (is_native && is_function)) {
+ if (is_const || (is_native && is_function)) {
attr |= READ_ONLY;
}
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- if (!lookup.IsFound() || is_function || is_module) {
+ if (!lookup.IsFound() || is_function) {
// If the local property exists, check that we can reconfigure it
// as required for function declarations.
if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(
- isolate, is_function ? "function" : "module", name);
+ return ThrowRedeclarationError(isolate, "function", name);
}
// If the existing property is not configurable, keep its attributes.
attr = lookup.GetAttributes();
@@ -1571,7 +1494,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
- NoHandleAllocation nha;
+ NoHandleAllocation nha(isolate);
// args[0] == name
// args[1] == language_mode
// args[2] == qml_mode
@@ -1607,27 +1530,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// the whole chain of hidden prototypes to do a 'local' lookup.
Object* object = global;
LookupResult lookup(isolate);
- while (object->IsJSObject() &&
- JSObject::cast(object)->map()->is_hidden_prototype()) {
- JSObject* raw_holder = JSObject::cast(object);
- raw_holder->LocalLookup(*name, &lookup, true);
- if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
- Handle<JSObject> holder(raw_holder);
- PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
- // Update the raw pointer in case it's changed due to GC.
- raw_holder = *holder;
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- // Found an interceptor that's not read only.
- if (assign) {
- return raw_holder->SetProperty(
- &lookup, *name, args[3], attributes, strict_mode_flag);
- } else {
- return isolate->heap()->undefined_value();
- }
+ JSObject::cast(object)->LocalLookup(*name, &lookup, true, true);
+ if (lookup.IsInterceptor()) {
+ HandleScope handle_scope(isolate);
+ PropertyAttributes intercepted =
+ lookup.holder()->GetPropertyAttribute(*name);
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ // Found an interceptor that's not read only.
+ if (assign) {
+ return lookup.holder()->SetProperty(
+ &lookup, *name, args[3], attributes, strict_mode_flag);
+ } else {
+ return isolate->heap()->undefined_value();
}
}
- object = raw_holder->GetPrototype();
}
// Reload global in case the loop above performed a GC.
@@ -1635,7 +1551,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
: isolate->context()->global_object();
if (assign) {
return global->SetProperty(
- *name, args[3], attributes, strict_mode_flag, JSReceiver::MAY_BE_STORE_FROM_KEYED, true);
+ *name, args[3], attributes, strict_mode_flag,
+ JSReceiver::MAY_BE_STORE_FROM_KEYED, true);
}
return isolate->heap()->undefined_value();
}
@@ -1698,7 +1615,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Strict mode handling not needed (const is disallowed in strict mode).
if (lookup.IsField()) {
FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex();
+ int index = lookup.GetFieldIndex().field_index();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
@@ -1788,7 +1705,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
if (lookup.IsField()) {
FixedArray* properties = object->properties();
- int index = lookup.GetFieldIndex();
+ int index = lookup.GetFieldIndex().field_index();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
@@ -1839,7 +1756,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) {
// length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_SMI_ARG_CHECKED(index, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- RUNTIME_ASSERT(last_match_info->HasFastObjectElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
isolate->counters()->regexp_entry_runtime()->Increment();
@@ -1894,7 +1810,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
CONVERT_ARG_CHECKED(String, source, 1);
// If source is the empty string we set it to "(?:)" instead as
// suggested by ECMA-262, 5th, section 15.10.4.1.
- if (source->length() == 0) source = isolate->heap()->query_colon_symbol();
+ if (source->length() == 0) source = isolate->heap()->query_colon_string();
Object* global = args[2];
if (!global->IsTrue()) global = isolate->heap()->false_value();
@@ -1919,9 +1835,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
- Smi::FromInt(0),
- SKIP_WRITE_BARRIER); // It's a Smi.
+ regexp->InObjectPropertyAtPut(
+ JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
return regexp;
}
@@ -1932,28 +1847,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
Heap* heap = isolate->heap();
MaybeObject* result;
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(),
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_string(),
source,
final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(),
+ // TODO(jkummerow): Turn these back into ASSERTs when we can be certain
+ // that it never fires in Release mode in the wild.
+ CHECK(!result->IsFailure());
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_string(),
global,
final);
- ASSERT(!result->IsFailure());
+ CHECK(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_string(),
ignoreCase,
final);
- ASSERT(!result->IsFailure());
- result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(),
+ CHECK(!result->IsFailure());
+ result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_string(),
multiline,
final);
- ASSERT(!result->IsFailure());
+ CHECK(!result->IsFailure());
result =
- regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(),
+ regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_string(),
Smi::FromInt(0),
writable);
- ASSERT(!result->IsFailure());
+ CHECK(!result->IsFailure());
USE(result);
return regexp;
}
@@ -1974,7 +1891,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate,
Handle<JSObject> holder,
const char* name,
Builtins::Name builtin_name) {
- Handle<String> key = isolate->factory()->LookupAsciiSymbol(name);
+ Handle<String> key = isolate->factory()->InternalizeUtf8String(name);
Handle<Code> code(isolate->builtins()->builtin(builtin_name));
Handle<JSFunction> optimized =
isolate->factory()->NewFunction(key,
@@ -2063,7 +1980,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2072,7 +1989,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2083,7 +2000,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
return isolate->heap()->ToBoolean(
@@ -2092,7 +2009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
f->shared()->set_name_should_print_as_anonymous(true);
@@ -2101,7 +2018,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2134,7 +2051,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2157,7 +2074,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2168,7 +2085,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2179,7 +2096,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
@@ -2194,11 +2111,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
- String* name = isolate->heap()->prototype_symbol();
+ String* name = isolate->heap()->prototype_string();
if (function->HasFastProperties()) {
// Construct a new field descriptor with updated attributes.
@@ -2237,7 +2154,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2246,7 +2163,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -2276,7 +2193,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
// target function to undefined. SetCode is only used for built-in
// constructors like String, Array, and Object, and some web code
// doesn't like seeing source code for constructors.
- target_shared->set_code(source_shared->code());
+ target_shared->ReplaceCode(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_formal_parameter_count(
@@ -2341,7 +2258,7 @@ MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, subject, 0);
@@ -2365,7 +2282,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
return CharFromCode(isolate, args[0]);
}
@@ -2480,7 +2397,7 @@ class ReplacementStringBuilder {
array_builder_(heap->isolate(), estimated_part_count),
subject_(subject),
character_count_(0),
- is_ascii_(subject->IsAsciiRepresentation()) {
+ is_ascii_(subject->IsOneByteRepresentation()) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(estimated_part_count > 0);
@@ -2520,7 +2437,7 @@ class ReplacementStringBuilder {
int length = string->length();
ASSERT(length > 0);
AddElement(*string);
- if (!string->IsAsciiRepresentation()) {
+ if (!string->IsOneByteRepresentation()) {
is_ascii_ = false;
}
IncrementCharacterCount(length);
@@ -2534,9 +2451,9 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
- Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
+ Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
AssertNoAllocation no_alloc;
- char* char_buffer = seq->GetChars();
+ uint8_t* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
char_buffer,
*array_builder_.array(),
@@ -2565,8 +2482,8 @@ class ReplacementStringBuilder {
}
private:
- Handle<SeqAsciiString> NewRawAsciiString(int length) {
- return heap_->isolate()->factory()->NewRawAsciiString(length);
+ Handle<SeqOneByteString> NewRawOneByteString(int length) {
+ return heap_->isolate()->factory()->NewRawOneByteString(length);
}
@@ -2793,7 +2710,7 @@ bool CompiledReplacement::Compile(Handle<String> replacement,
bool simple = false;
if (content.IsAscii()) {
simple = ParseReplacementPattern(&parts_,
- content.ToAsciiVector(),
+ content.ToOneByteVector(),
capture_count,
subject_length,
zone());
@@ -2869,7 +2786,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder,
}
-void FindAsciiStringIndices(Vector<const char> subject,
+void FindAsciiStringIndices(Vector<const uint8_t> subject,
char pattern,
ZoneList<int>* indices,
unsigned int limit,
@@ -2877,11 +2794,11 @@ void FindAsciiStringIndices(Vector<const char> subject,
ASSERT(limit > 0);
// Collect indices of pattern in subject using memchr.
// Stop after finding at most limit values.
- const char* subject_start = reinterpret_cast<const char*>(subject.start());
- const char* subject_end = subject_start + subject.length();
- const char* pos = subject_start;
+ const uint8_t* subject_start = subject.start();
+ const uint8_t* subject_end = subject_start + subject.length();
+ const uint8_t* pos = subject_start;
while (limit > 0) {
- pos = reinterpret_cast<const char*>(
+ pos = reinterpret_cast<const uint8_t*>(
memchr(pos, pattern, subject_end - pos));
if (pos == NULL) return;
indices->Add(static_cast<int>(pos - subject_start), zone);
@@ -2891,6 +2808,23 @@ void FindAsciiStringIndices(Vector<const char> subject,
}
+void FindTwoByteStringIndices(const Vector<const uc16> subject,
+ uc16 pattern,
+ ZoneList<int>* indices,
+ unsigned int limit,
+ Zone* zone) {
+ ASSERT(limit > 0);
+ const uc16* subject_start = subject.start();
+ const uc16* subject_end = subject_start + subject.length();
+ for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
+ if (*pos == pattern) {
+ indices->Add(static_cast<int>(pos - subject_start), zone);
+ limit--;
+ }
+ }
+}
+
+
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -2927,9 +2861,10 @@ void FindStringIndicesDispatch(Isolate* isolate,
ASSERT(subject_content.IsFlat());
ASSERT(pattern_content.IsFlat());
if (subject_content.IsAscii()) {
- Vector<const char> subject_vector = subject_content.ToAsciiVector();
+ Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
if (pattern_content.IsAscii()) {
- Vector<const char> pattern_vector = pattern_content.ToAsciiVector();
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
if (pattern_vector.length() == 1) {
FindAsciiStringIndices(subject_vector,
pattern_vector[0],
@@ -2955,19 +2890,38 @@ void FindStringIndicesDispatch(Isolate* isolate,
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
if (pattern_content.IsAscii()) {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToAsciiVector(),
- indices,
- limit,
- zone);
+ Vector<const uint8_t> pattern_vector =
+ pattern_content.ToOneByteVector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
} else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_content.ToUC16Vector(),
- indices,
- limit,
- zone);
+ Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
+ if (pattern_vector.length() == 1) {
+ FindTwoByteStringIndices(subject_vector,
+ pattern_vector[0],
+ indices,
+ limit,
+ zone);
+ } else {
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_vector,
+ indices,
+ limit,
+ zone);
+ }
}
}
}
@@ -2975,7 +2929,7 @@ void FindStringIndicesDispatch(Isolate* isolate,
template<typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> pattern_regexp,
@@ -3006,7 +2960,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
static_cast<int64_t>(pattern_len)) *
static_cast<int64_t>(matches) +
static_cast<int64_t>(subject_len);
- if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException();
+ if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11);
int result_len = static_cast<int>(result_len_64);
int subject_pos = 0;
@@ -3015,7 +2969,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Handle<ResultSeqString> result;
if (ResultSeqString::kHasAsciiEncoding) {
result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(result_len));
+ isolate->factory()->NewRawOneByteString(result_len));
} else {
result = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(result_len));
@@ -3058,7 +3012,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
}
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
@@ -3067,7 +3021,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
ASSERT(subject->IsFlat());
ASSERT(replacement->IsFlat());
- bool is_global = regexp->GetFlags().is_global();
int capture_count = regexp->CaptureCount();
int subject_length = subject->length();
@@ -3080,19 +3033,18 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
subject_length);
// Shortcut for simple non-regexp global replacements
- if (is_global &&
- regexp->TypeTag() == JSRegExp::ATOM &&
- simple_replace) {
- if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
+ if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) {
+ if (subject->IsOneByteConvertible() &&
+ replacement->IsOneByteConvertible()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
- return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
isolate, subject, regexp, replacement, last_match_info);
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
int32_t* current_match = global_cache.FetchNext();
@@ -3104,8 +3056,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
// Guessing the number of parts that the final result string is built
// from. Global regexps can match any number of times, so we guess
// conservatively.
- int expected_parts =
- (compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1;
+ int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1;
ReplacementStringBuilder builder(isolate->heap(),
subject,
expected_parts);
@@ -3137,9 +3088,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
}
prev = end;
- // Only continue checking for global regexps.
- if (!is_global) break;
-
current_match = global_cache.FetchNext();
} while (current_match != NULL);
@@ -3160,37 +3108,26 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
template <typename ResultSeqString>
-MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
+MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString(
Isolate* isolate,
Handle<String> subject,
Handle<JSRegExp> regexp,
Handle<JSArray> last_match_info) {
ASSERT(subject->IsFlat());
- bool is_global = regexp->GetFlags().is_global();
-
// Shortcut for simple non-regexp global replacements
- if (is_global &&
- regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string(HEAP->empty_string());
- if (subject->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqAsciiString>(
- isolate,
- subject,
- regexp,
- empty_string,
- last_match_info);
+ if (regexp->TypeTag() == JSRegExp::ATOM) {
+ Handle<String> empty_string = isolate->factory()->empty_string();
+ if (subject->IsOneByteRepresentation()) {
+ return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
} else {
- return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
- isolate,
- subject,
- regexp,
- empty_string,
- last_match_info);
+ return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>(
+ isolate, subject, regexp, empty_string, last_match_info);
}
}
- RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate);
+ RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
int32_t* current_match = global_cache.FetchNext();
@@ -3210,29 +3147,12 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawAsciiString(new_length));
+ isolate->factory()->NewRawOneByteString(new_length));
} else {
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
}
- if (!is_global) {
- RegExpImpl::SetLastMatchInfo(
- last_match_info, subject, capture_count, current_match);
- if (start == end) {
- return *subject;
- } else {
- if (start > 0) {
- String::WriteToFlat(*subject, answer->GetChars(), 0, start);
- }
- if (end < subject_length) {
- String::WriteToFlat(
- *subject, answer->GetChars() + start, end, subject_length);
- }
- return *answer;
- }
- }
-
int prev = 0;
int position = 0;
@@ -3241,8 +3161,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
end = current_match[1];
if (prev < start) {
// Add substring subject[prev;start] to answer string.
- String::WriteToFlat(
- *subject, answer->GetChars() + position, prev, start);
+ String::WriteToFlat(*subject, answer->GetChars() + position, prev, start);
position += start - prev;
}
prev = end;
@@ -3284,7 +3203,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) {
ASSERT(args.length() == 4);
HandleScope scope(isolate);
@@ -3294,23 +3213,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
- if (!subject->IsFlat()) subject = FlattenGetString(subject);
-
- if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+ ASSERT(regexp->GetFlags().is_global());
- ASSERT(last_match_info->HasFastObjectElements());
+ if (!subject->IsFlat()) subject = FlattenGetString(subject);
if (replacement->length() == 0) {
- if (subject->HasOnlyAsciiChars()) {
- return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
+ if (subject->IsOneByteConvertible()) {
+ return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>(
isolate, subject, regexp, last_match_info);
} else {
- return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
+ return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>(
isolate, subject, regexp, last_match_info);
}
}
- return StringReplaceRegExpWithString(
+ if (!replacement->IsFlat()) replacement = FlattenGetString(replacement);
+
+ return StringReplaceGlobalRegExpWithString(
isolate, subject, regexp, replacement, last_match_info);
}
@@ -3413,10 +3332,10 @@ int Runtime::StringMatch(Isolate* isolate,
// dispatch on type of strings
if (seq_pat.IsAscii()) {
- Vector<const char> pat_vector = seq_pat.ToAsciiVector();
+ Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
if (seq_sub.IsAscii()) {
return SearchString(isolate,
- seq_sub.ToAsciiVector(),
+ seq_sub.ToOneByteVector(),
pat_vector,
start_index);
}
@@ -3428,7 +3347,7 @@ int Runtime::StringMatch(Isolate* isolate,
Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
if (seq_sub.IsAscii()) {
return SearchString(isolate,
- seq_sub.ToAsciiVector(),
+ seq_sub.ToOneByteVector(),
pat_vector,
start_index);
}
@@ -3468,7 +3387,7 @@ static int StringMatchBackwards(Vector<const schar> subject,
if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
for (int i = 0; i < pattern_length; i++) {
uc16 c = pattern[i];
- if (c > String::kMaxAsciiCharCode) {
+ if (c > String::kMaxOneByteCharCode) {
return -1;
}
}
@@ -3523,9 +3442,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
String::FlatContent pat_content = pat->GetFlatContent();
if (pat_content.IsAscii()) {
- Vector<const char> pat_vector = pat_content.ToAsciiVector();
+ Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToAsciiVector(),
+ position = StringMatchBackwards(sub_content.ToOneByteVector(),
pat_vector,
start_index);
} else {
@@ -3536,7 +3455,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
} else {
Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
if (sub_content.IsAscii()) {
- position = StringMatchBackwards(sub_content.ToAsciiVector(),
+ position = StringMatchBackwards(sub_content.ToOneByteVector(),
pat_vector,
start_index);
} else {
@@ -3551,7 +3470,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
@@ -3580,17 +3499,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
str1->TryFlatten();
str2->TryFlatten();
- StringInputBuffer& buf1 =
- *isolate->runtime_state()->string_locale_compare_buf1();
- StringInputBuffer& buf2 =
- *isolate->runtime_state()->string_locale_compare_buf2();
-
- buf1.Reset(str1);
- buf2.Reset(str2);
+ ConsStringIteratorOp* op1 =
+ isolate->runtime_state()->string_locale_compare_it1();
+ ConsStringIteratorOp* op2 =
+ isolate->runtime_state()->string_locale_compare_it2();
+ // TODO(dcarney) Can do array compares here more efficiently.
+ StringCharacterStream stream1(str1, op1);
+ StringCharacterStream stream2(str2, op2);
for (int i = 0; i < end; i++) {
- uint16_t char1 = buf1.GetNext();
- uint16_t char2 = buf2.GetNext();
+ uint16_t char1 = stream1.GetNext();
+ uint16_t char2 = stream2.GetNext();
if (char1 != char2) return Smi::FromInt(char1 - char2);
}
@@ -3599,7 +3518,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, value, 0);
@@ -3621,6 +3540,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) {
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
isolate->counters()->sub_string_runtime()->Increment();
+ if (end - start == 1) {
+ return isolate->heap()->LookupSingleCharacterStringFromCode(
+ value->Get(start));
+ }
return value->SubString(start, end);
}
@@ -3631,7 +3554,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) {
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2);
- HandleScope handles;
+ HandleScope handles(isolate);
RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate);
if (global_cache.HasException()) return Failure::Exception();
@@ -3702,7 +3625,7 @@ static MaybeObject* SearchRegExpMultiple(
isolate->heap(),
*subject,
regexp->data(),
- RegExpResultsCache::REGEXP_MULTIPLE_INDICES));
+ RegExpResultsCache::REGEXP_MULTIPLE_INDICES), isolate);
if (*cached_answer != Smi::FromInt(0)) {
Handle<FixedArray> cached_fixed_array =
Handle<FixedArray>(FixedArray::cast(*cached_answer));
@@ -3844,7 +3767,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2);
CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3);
- ASSERT(last_match_info->HasFastObjectElements());
ASSERT(regexp->GetFlags().is_global());
if (regexp->CaptureCount() == 0) {
@@ -3858,7 +3780,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(radix, 1);
RUNTIME_ASSERT(2 <= radix && radix <= 36);
@@ -3877,92 +3799,65 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
// Slow case.
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
+ return *isolate->factory()->nan_string();
}
if (isinf(value)) {
if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
+ return *isolate->factory()->minus_infinity_string();
}
- return *isolate->factory()->infinity_symbol();
+ return *isolate->factory()->infinity_string();
}
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
- if (isnan(value)) {
- return *isolate->factory()->nan_symbol();
- }
- if (isinf(value)) {
- if (value < 0) {
- return *isolate->factory()->minus_infinity_symbol();
- }
- return *isolate->factory()->infinity_symbol();
- }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromAscii(CStrVector(str));
+ isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
DeleteArray(str);
return res;
}
@@ -3974,6 +3869,7 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) {
if (index < static_cast<uint32_t>(string->length())) {
string->TryFlatten();
return LookupSingleCharacterStringFromCode(
+ string->GetIsolate(),
string->Get(index));
}
return Execution::CharAt(string, index);
@@ -3998,7 +3894,7 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
- return object->GetPrototype()->GetElement(index);
+ return object->GetPrototype(isolate)->GetElement(index);
}
return object->GetElement(index);
@@ -4047,7 +3943,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
Handle<Object> object = args.at<Object>(0);
@@ -4059,7 +3955,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) {
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
// Fast cases for getting named properties of the receiver JSObject
@@ -4095,7 +3991,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
if (result.IsField()) {
- int offset = result.GetFieldIndex();
+ int offset = result.GetFieldIndex().field_index();
keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
}
@@ -4121,8 +4017,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastElementsKind(elements_kind) &&
- !IsFastObjectElementsKind(elements_kind)) {
+ if (IsFastDoubleElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
@@ -4135,6 +4030,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
+ } else {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+ !IsFastElementsKind(elements_kind));
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4269,7 +4167,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
case NORMAL:
return lookup.holder()->GetNormalizedProperty(&lookup);
case FIELD:
- return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
+ return lookup.holder()->FastPropertyAt(
+ lookup.GetFieldIndex().field_index());
case CONSTANT_FUNCTION:
return lookup.GetConstantFunction();
case CALLBACKS:
@@ -4457,7 +4356,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
Handle<Object> object = args.at<Object>(0);
@@ -4485,12 +4384,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) {
+ HandleScope scope(isolate);
+ RUNTIME_ASSERT(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Map, map, 1);
+ JSObject::TransitionElementsKind(array, map->elements_kind());
+ return *array;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+ ASSERT(!js_object->map()->is_observed());
ElementsKind new_kind = js_object->HasFastHoleyElements()
? FAST_HOLEY_DOUBLE_ELEMENTS
: FAST_DOUBLE_ELEMENTS;
@@ -4502,11 +4412,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
if (object->IsJSObject()) {
Handle<JSObject> js_object(Handle<JSObject>::cast(object));
+ ASSERT(!js_object->map()->is_observed());
ElementsKind new_kind = js_object->HasFastHoleyElements()
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
@@ -4521,7 +4432,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) {
// This is used to decide if we should transform null and undefined
// into the global object when doing call and apply.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 1);
Handle<Object> object = args.at<Object>(0);
@@ -4541,7 +4452,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
Handle<Object> value = args.at<Object>(2);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
CONVERT_SMI_ARG_CHECKED(literal_index, 4);
- HandleScope scope;
+ HandleScope scope(isolate);
Object* raw_boilerplate_object = literals->get(literal_index);
Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object));
@@ -4624,7 +4535,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) {
// Set a local property, even if it is READ_ONLY. If the property does not
// exist, it will be added with attributes NONE.
RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(String, name, 1);
@@ -4644,7 +4555,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSReceiver, object, 0);
@@ -4663,7 +4574,7 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate,
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
- Handle<Object> proto(object->GetPrototype());
+ Handle<Object> proto(object->GetPrototype(), isolate);
if (proto->IsJSObject() &&
Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
return HasLocalPropertyImplementation(isolate,
@@ -4675,7 +4586,7 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, key, 1);
@@ -4713,7 +4624,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_ARG_CHECKED(String, key, 1);
@@ -4725,7 +4636,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, receiver, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
@@ -4737,47 +4648,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(String, key, 1);
- uint32_t index;
- if (key->AsArrayIndex(&index)) {
- JSObject::LocalElementType type = object->GetLocalElementType(index);
- switch (type) {
- case JSObject::UNDEFINED_ELEMENT:
- case JSObject::STRING_CHARACTER_ELEMENT:
- return isolate->heap()->false_value();
- case JSObject::INTERCEPTED_ELEMENT:
- case JSObject::FAST_ELEMENT:
- return isolate->heap()->true_value();
- case JSObject::DICTIONARY_ELEMENT: {
- if (object->IsJSGlobalProxy()) {
- Object* proto = object->GetPrototype();
- if (proto->IsNull()) {
- return isolate->heap()->false_value();
- }
- ASSERT(proto->IsJSGlobalObject());
- object = JSObject::cast(proto);
- }
- FixedArray* elements = FixedArray::cast(object->elements());
- SeededNumberDictionary* dictionary = NULL;
- if (elements->map() ==
- isolate->heap()->non_strict_arguments_elements_map()) {
- dictionary = SeededNumberDictionary::cast(elements->get(1));
- } else {
- dictionary = SeededNumberDictionary::cast(elements);
- }
- int entry = dictionary->FindEntry(index);
- ASSERT(entry != SeededNumberDictionary::kNotFound);
- PropertyDetails details = dictionary->DetailsAt(entry);
- return isolate->heap()->ToBoolean(!details.IsDontEnum());
- }
- }
- }
-
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
}
@@ -4911,7 +4787,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) {
int dest_pos = 0;
for (int i = 0; i < total_property_count; i++) {
Object* name = old_names->get(i);
- if (name == isolate->heap()->hidden_symbol()) {
+ if (name == isolate->heap()->hidden_string()) {
continue;
}
names->set(dest_pos++, name);
@@ -5002,7 +4878,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
return *isolate->factory()->NewJSArray(0);
}
- Handle<Object> proto(object->GetPrototype());
+ Handle<Object> proto(object->GetPrototype(), isolate);
// If proxy is detached we simply return an empty array.
if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
object = Handle<JSObject>::cast(proto);
@@ -5036,7 +4912,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
// Compute the frame holding the arguments.
@@ -5072,8 +4948,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) {
}
// Handle special arguments properties.
- if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n);
- if (key->Equals(isolate->heap()->callee_symbol())) {
+ if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n);
+ if (key->Equals(isolate->heap()->callee_string())) {
Object* function = frame->function();
if (function->IsJSFunction() &&
!JSFunction::cast(function)->shared()->is_classic_mode()) {
@@ -5098,7 +4974,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
return args[0]->ToBoolean();
@@ -5108,46 +4984,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) {
// Returns the type string of a value; see ECMA-262, 11.4.3 (p 47).
// Possible optimizations: put the type string into the oddballs.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
Object* obj = args[0];
- if (obj->IsNumber()) return isolate->heap()->number_symbol();
+ if (obj->IsNumber()) return isolate->heap()->number_string();
HeapObject* heap_obj = HeapObject::cast(obj);
// typeof an undetectable object is 'undefined'
if (heap_obj->map()->is_undetectable()) {
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_string();
}
InstanceType instance_type = heap_obj->map()->instance_type();
if (instance_type < FIRST_NONSTRING_TYPE) {
- return isolate->heap()->string_symbol();
+ return isolate->heap()->string_string();
}
switch (instance_type) {
case ODDBALL_TYPE:
if (heap_obj->IsTrue() || heap_obj->IsFalse()) {
- return isolate->heap()->boolean_symbol();
+ return isolate->heap()->boolean_string();
}
if (heap_obj->IsNull()) {
return FLAG_harmony_typeof
- ? isolate->heap()->null_symbol()
- : isolate->heap()->object_symbol();
+ ? isolate->heap()->null_string()
+ : isolate->heap()->object_string();
}
ASSERT(heap_obj->IsUndefined());
- return isolate->heap()->undefined_symbol();
+ return isolate->heap()->undefined_string();
case JS_FUNCTION_TYPE:
case JS_FUNCTION_PROXY_TYPE:
- return isolate->heap()->function_symbol();
+ return isolate->heap()->function_string();
default:
// For any kind of object not handled above, the spec rule for
// host objects gives that it is okay to return "object"
- return isolate->heap()->object_symbol();
+ return isolate->heap()->object_string();
}
}
-static bool AreDigits(const char*s, int from, int to) {
+static bool AreDigits(const uint8_t*s, int from, int to) {
for (int i = from; i < to; i++) {
if (s[i] < '0' || s[i] > '9') return false;
}
@@ -5156,7 +5032,7 @@ static bool AreDigits(const char*s, int from, int to) {
}
-static int ParseDecimalInteger(const char*s, int from, int to) {
+static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
ASSERT(to - from < 10); // Overflow is not possible.
ASSERT(from < to);
int d = s[from] - '0';
@@ -5170,17 +5046,17 @@ static int ParseDecimalInteger(const char*s, int from, int to) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, subject, 0);
subject->TryFlatten();
// Fast case: short integer or some sorts of junk values.
int len = subject->length();
- if (subject->IsSeqAsciiString()) {
+ if (subject->IsSeqOneByteString()) {
if (len == 0) return Smi::FromInt(0);
- char const* data = SeqAsciiString::cast(subject)->GetChars();
+ uint8_t const* data = SeqOneByteString::cast(subject)->GetChars();
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -5190,8 +5066,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
// Fast check for a junk value. A valid string may start from a
// whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or
// the 'I' character ('Infinity'). All of that have codes not greater than
- // '9' except 'I'.
- if (data[start_pos] != 'I') {
+ // '9' except 'I' and &nbsp;.
+ if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
return isolate->heap()->nan_value();
}
} else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
@@ -5224,244 +5100,49 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
-
- CONVERT_ARG_CHECKED(JSArray, codes, 0);
- int length = Smi::cast(codes->length())->value();
-
- // Check if the string can be ASCII.
- int i;
- for (i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- // We probably can't get an exception here, but just in order to enforce
- // the checking of inputs in the runtime calls we check here.
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- if ((chr & 0xffff) > String::kMaxAsciiCharCode)
- break;
- }
-
- MaybeObject* maybe_object = NULL;
- if (i == length) { // The string is ASCII.
- maybe_object = isolate->heap()->AllocateRawAsciiString(length);
- } else { // The string is not ASCII.
- maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
- }
-
- Object* object = NULL;
- if (!maybe_object->ToObject(&object)) return maybe_object;
- String* result = String::cast(object);
- for (int i = 0; i < length; i++) {
- Object* element;
- { MaybeObject* maybe_element = codes->GetElement(i);
- if (!maybe_element->ToObject(&element)) return maybe_element;
- }
- CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
- result->Set(i, chr & 0xffff);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
+ CONVERT_SMI_ARG_CHECKED(length, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
+ if (length == 0) return isolate->heap()->empty_string();
+ if (is_one_byte) {
+ return isolate->heap()->AllocateRawOneByteString(length);
+ } else {
+ return isolate->heap()->AllocateRawTwoByteString(length);
}
- return result;
}
-// kNotEscaped is generated by the following:
-//
-// #!/bin/perl
-// for (my $i = 0; $i < 256; $i++) {
-// print "\n" if $i % 16 == 0;
-// my $c = chr($i);
-// my $escaped = 1;
-// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
-// print $escaped ? "0, " : "1, ";
-// }
-
-
-static bool IsNotEscaped(uint16_t character) {
- // Only for 8 bit characters, the rest are always escaped (in a different way)
- ASSERT(character < 256);
- static const char kNotEscaped[256] = {
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
- 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- };
- return kNotEscaped[character] != 0;
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
+ CONVERT_ARG_CHECKED(SeqString, string, 0);
+ CONVERT_SMI_ARG_CHECKED(new_length, 1);
+ return string->Truncate(new_length);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
- const char hex_chars[] = "0123456789ABCDEF";
- NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, source, 0);
-
- source->TryFlatten();
-
- int escaped_length = 0;
- int length = source->length();
- {
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(source);
- while (buffer->has_more()) {
- uint16_t character = buffer->GetNext();
- if (character >= 256) {
- escaped_length += 6;
- } else if (IsNotEscaped(character)) {
- escaped_length++;
- } else {
- escaped_length += 3;
- }
- // We don't allow strings that are longer than a maximal length.
- ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
- if (escaped_length > String::kMaxLength) {
- isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- }
- }
- // No length change implies no change. Return original string if no change.
- if (escaped_length == length) {
- return source;
- }
- Object* o;
- { MaybeObject* maybe_o =
- isolate->heap()->AllocateRawAsciiString(escaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
- int dest_position = 0;
-
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Rewind();
- while (buffer->has_more()) {
- uint16_t chr = buffer->GetNext();
- if (chr >= 256) {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, 'u');
- destination->Set(dest_position+2, hex_chars[chr >> 12]);
- destination->Set(dest_position+3, hex_chars[(chr >> 8) & 0xf]);
- destination->Set(dest_position+4, hex_chars[(chr >> 4) & 0xf]);
- destination->Set(dest_position+5, hex_chars[chr & 0xf]);
- dest_position += 6;
- } else if (IsNotEscaped(chr)) {
- destination->Set(dest_position, chr);
- dest_position++;
- } else {
- destination->Set(dest_position, '%');
- destination->Set(dest_position+1, hex_chars[chr >> 4]);
- destination->Set(dest_position+2, hex_chars[chr & 0xf]);
- dest_position += 3;
- }
- }
- return destination;
-}
-
-
-static inline int TwoDigitHex(uint16_t character1, uint16_t character2) {
- static const signed char kHexValue['g'] = {
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 10, 11, 12, 13, 14, 15 };
-
- if (character1 > 'f') return -1;
- int hi = kHexValue[character1];
- if (hi == -1) return -1;
- if (character2 > 'f') return -1;
- int lo = kHexValue[character2];
- if (lo == -1) return -1;
- return (hi << 4) + lo;
-}
-
-
-static inline int Unescape(String* source,
- int i,
- int length,
- int* step) {
- uint16_t character = source->Get(i);
- int32_t hi = 0;
- int32_t lo = 0;
- if (character == '%' &&
- i <= length - 6 &&
- source->Get(i + 1) == 'u' &&
- (hi = TwoDigitHex(source->Get(i + 2),
- source->Get(i + 3))) != -1 &&
- (lo = TwoDigitHex(source->Get(i + 4),
- source->Get(i + 5))) != -1) {
- *step = 6;
- return (hi << 8) + lo;
- } else if (character == '%' &&
- i <= length - 3 &&
- (lo = TwoDigitHex(source->Get(i + 1),
- source->Get(i + 2))) != -1) {
- *step = 3;
- return lo;
- } else {
- *step = 1;
- return character;
- }
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = FlattenGetString(source);
+ String::FlatContent content = string->GetFlatContent();
+ ASSERT(content.IsFlat());
+ Handle<String> result =
+ content.IsAscii() ? URIEscape::Escape<uint8_t>(isolate, source)
+ : URIEscape::Escape<uc16>(isolate, source);
+ if (result.is_null()) return Failure::OutOfMemoryException(0x12);
+ return *result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
- NoHandleAllocation ha;
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(String, source, 0);
-
- source->TryFlatten();
-
- bool ascii = true;
- int length = source->length();
-
- int unescaped_length = 0;
- for (int i = 0; i < length; unescaped_length++) {
- int step;
- if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode) {
- ascii = false;
- }
- i += step;
- }
-
- // No length change implies no change. Return original string if no change.
- if (unescaped_length == length)
- return source;
-
- Object* o;
- { MaybeObject* maybe_o =
- ascii ?
- isolate->heap()->AllocateRawAsciiString(unescaped_length) :
- isolate->heap()->AllocateRawTwoByteString(unescaped_length);
- if (!maybe_o->ToObject(&o)) return maybe_o;
- }
- String* destination = String::cast(o);
-
- int dest_position = 0;
- for (int i = 0; i < length; dest_position++) {
- int step;
- destination->Set(dest_position, Unescape(source, i, length, &step));
- i += step;
- }
- return destination;
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
+ Handle<String> string = FlattenGetString(source);
+ String::FlatContent content = string->GetFlatContent();
+ ASSERT(content.IsFlat());
+ return content.IsAscii() ? *URIUnescape::Unescape<uint8_t>(isolate, source)
+ : *URIUnescape::Unescape<uc16>(isolate, source);
}
@@ -5547,8 +5228,8 @@ MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
template <>
-MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawAsciiString(length);
+MaybeObject* AllocateRawString<SeqOneByteString>(Isolate* isolate, int length) {
+ return isolate->heap()->AllocateRawOneByteString(length);
}
@@ -5562,7 +5243,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
int quoted_length = kSpaceForQuotes;
while (read_cursor < end) {
Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+ if (static_cast<unsigned>(c) >= kQuoteTableLength) {
quoted_length++;
} else {
quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)];
@@ -5584,7 +5265,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate,
read_cursor = characters.start();
while (read_cursor < end) {
Char c = *(read_cursor++);
- if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) {
+ if (static_cast<unsigned>(c) >= kQuoteTableLength) {
*(write_cursor++) = c;
} else {
int len = JsonQuoteLengths[static_cast<unsigned>(c)];
@@ -5612,8 +5293,7 @@ static inline SinkChar* WriteQuoteJsonString(
*(write_cursor++) = '"';
while (read_cursor < end) {
SourceChar c = *(read_cursor++);
- if (sizeof(SourceChar) > 1u &&
- static_cast<unsigned>(c) >= kQuoteTableLength) {
+ if (static_cast<unsigned>(c) >= kQuoteTableLength) {
*(write_cursor++) = static_cast<SinkChar>(c);
} else {
int len = JsonQuoteLengths[static_cast<unsigned>(c)];
@@ -5682,7 +5362,7 @@ static MaybeObject* QuoteJsonString(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
if (!str->IsFlat()) {
MaybeObject* try_flatten = str->TryFlatten();
@@ -5699,14 +5379,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString, false>(isolate,
- flat.ToAsciiVector());
+ return QuoteJsonString<uint8_t, SeqOneByteString, false>(
+ isolate,
+ flat.ToOneByteVector());
}
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
if (!str->IsFlat()) {
MaybeObject* try_flatten = str->TryFlatten();
@@ -5722,8 +5403,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqAsciiString, true>(isolate,
- flat.ToAsciiVector());
+ return QuoteJsonString<uint8_t, SeqOneByteString, true>(
+ isolate,
+ flat.ToOneByteVector());
}
}
@@ -5764,9 +5446,10 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
write_cursor,
content.ToUC16Vector());
} else {
- write_cursor = WriteQuoteJsonString<Char, char>(isolate,
- write_cursor,
- content.ToAsciiVector());
+ write_cursor =
+ WriteQuoteJsonString<Char, uint8_t>(isolate,
+ write_cursor,
+ content.ToOneByteVector());
}
}
*(write_cursor++) = ']';
@@ -5782,7 +5465,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate,
RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSArray, array, 0);
@@ -5814,7 +5497,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
}
if (ascii) {
- return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
+ return QuoteJsonStringArray<char, SeqOneByteString>(isolate,
elements,
worst_case_length);
} else {
@@ -5829,12 +5512,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
ASSERT(args.length() == 1);
HandleScope scope(isolate);
BasicJsonStringifier stringifier(isolate);
- return stringifier.Stringify(Handle<Object>(args[0]));
+ return stringifier.Stringify(Handle<Object>(args[0], isolate));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
CONVERT_SMI_ARG_CHECKED(radix, 1);
@@ -5848,7 +5531,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
CONVERT_ARG_CHECKED(String, str, 0);
// ECMA-262 section 15.1.2.3, empty string is NaN
@@ -5879,8 +5562,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
Object* o;
- { MaybeObject* maybe_o = s->IsAsciiRepresentation()
- ? isolate->heap()->AllocateRawAsciiString(length)
+ { MaybeObject* maybe_o = s->IsOneByteRepresentation()
+ ? isolate->heap()->AllocateRawOneByteString(length)
: isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
@@ -5889,15 +5572,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// Convert all characters to upper case, assuming that they will fit
// in the buffer
- Access<StringInputBuffer> buffer(
- isolate->runtime_state()->string_input_buffer());
- buffer->Reset(s);
+ Access<ConsStringIteratorOp> op(
+ isolate->runtime_state()->string_iterator());
+ StringCharacterStream stream(s, op.value());
unibrow::uchar chars[Converter::kMaxWidth];
// We can assume that the string is not empty
- uc32 current = buffer->GetNext();
+ uc32 current = stream.GetNext();
for (int i = 0; i < length;) {
- bool has_next = buffer->has_more();
- uc32 next = has_next ? buffer->GetNext() : 0;
+ bool has_next = stream.HasMore();
+ uc32 next = has_next ? stream.GetNext() : 0;
int char_length = mapping->get(current, next, chars);
if (char_length == 0) {
// The case conversion of this character is the character itself.
@@ -5927,8 +5610,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
if (next_length == 0) next_length = 1;
}
int current_length = i + char_length + next_length;
- while (buffer->has_more()) {
- current = buffer->GetNext();
+ while (stream.HasMore()) {
+ current = stream.GetNext();
// NOTE: we use 0 as the next character here because, while
// the next character may affect what a character converts to,
// it does not in any case affect the length of what it convert
@@ -5938,7 +5621,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
current_length += char_length;
if (current_length > Smi::kMaxValue) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x13);
}
}
// Try again with the real length.
@@ -5967,7 +5650,9 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
namespace {
static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
-
+#ifdef ENABLE_LATIN_1
+static const uintptr_t kAsciiMask = kOneInEveryByte << 7;
+#endif
// Given a word and two range boundaries returns a word with high bit
// set in every byte iff the corresponding input byte was strictly in
@@ -5977,11 +5662,14 @@ static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF;
// Requires: all bytes in the input word and the boundaries must be
// ASCII (less than 0x7F).
static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
- // Every byte in an ASCII string is less than or equal to 0x7F.
- ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
// Use strict inequalities since in edge cases the function could be
// further simplified.
- ASSERT(0 < m && m < n && n < 0x7F);
+ ASSERT(0 < m && m < n);
+#ifndef ENABLE_LATIN_1
+ // Every byte in an ASCII string is less than or equal to 0x7F.
+ ASSERT((w & (kOneInEveryByte * 0x7F)) == w);
+ ASSERT(n < 0x7F);
+#endif
// Has high bit set in every w byte less than n.
uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
// Has high bit set in every w byte greater than m.
@@ -5998,7 +5686,11 @@ enum AsciiCaseConversion {
template <AsciiCaseConversion dir>
struct FastAsciiConverter {
+#ifdef ENABLE_LATIN_1
+ static bool Convert(char* dst, char* src, int length, bool* changed_out) {
+#else
static bool Convert(char* dst, char* src, int length) {
+#endif
#ifdef DEBUG
char* saved_dst = dst;
char* saved_src = src;
@@ -6010,12 +5702,18 @@ struct FastAsciiConverter {
const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1;
const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1;
bool changed = false;
+#ifdef ENABLE_LATIN_1
+ uintptr_t or_acc = 0;
+#endif
char* const limit = src + length;
#ifdef V8_HOST_CAN_READ_UNALIGNED
// Process the prefix of the input that requires no conversion one
// (machine) word at a time.
while (src <= limit - sizeof(uintptr_t)) {
uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+#ifdef ENABLE_LATIN_1
+ or_acc |= w;
+#endif
if (AsciiRangeMask(w, lo, hi) != 0) {
changed = true;
break;
@@ -6028,6 +5726,9 @@ struct FastAsciiConverter {
// required one word at a time.
while (src <= limit - sizeof(uintptr_t)) {
uintptr_t w = *reinterpret_cast<uintptr_t*>(src);
+#ifdef ENABLE_LATIN_1
+ or_acc |= w;
+#endif
uintptr_t m = AsciiRangeMask(w, lo, hi);
// The mask has high (7th) bit set in every byte that needs
// conversion and we know that the distance between cases is
@@ -6041,6 +5742,9 @@ struct FastAsciiConverter {
// unaligned access is not supported).
while (src < limit) {
char c = *src;
+#ifdef ENABLE_LATIN_1
+ or_acc |= c;
+#endif
if (lo < c && c < hi) {
c ^= (1 << 5);
changed = true;
@@ -6049,10 +5753,20 @@ struct FastAsciiConverter {
++src;
++dst;
}
+#ifdef ENABLE_LATIN_1
+ if ((or_acc & kAsciiMask) != 0) {
+ return false;
+ }
+#endif
#ifdef DEBUG
CheckConvert(saved_dst, saved_src, length, changed);
#endif
+#ifdef ENABLE_LATIN_1
+ *changed_out = changed;
+ return true;
+#else
return changed;
+#endif
}
#ifdef DEBUG
@@ -6097,7 +5811,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
Arguments args,
Isolate* isolate,
unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
CONVERT_ARG_CHECKED(String, s, 0);
s = s->TryFlattenGetString();
@@ -6111,15 +5825,30 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqAsciiString()) {
+ if (s->IsSeqOneByteString()) {
Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
+ { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
- SeqAsciiString* result = SeqAsciiString::cast(o);
+ SeqOneByteString* result = SeqOneByteString::cast(o);
+#ifndef ENABLE_LATIN_1
bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
+ reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ length);
return has_changed_character ? result : s;
+#else
+ bool has_changed_character;
+ bool is_ascii = ConvertTraits::AsciiConverter::Convert(
+ reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()),
+ length,
+ &has_changed_character);
+ // If not ASCII, we discard the result and take the 2 byte path.
+ if (is_ascii) {
+ return has_changed_character ? result : s;
+ }
+#endif
}
Object* answer;
@@ -6157,7 +5886,7 @@ static inline bool IsTrimWhiteSpace(unibrow::uchar c) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(String, s, 0);
@@ -6196,11 +5925,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
RUNTIME_ASSERT(pattern_length > 0);
if (limit == 0xffffffffu) {
- Handle<Object> cached_answer(RegExpResultsCache::Lookup(
- isolate->heap(),
- *subject,
- *pattern,
- RegExpResultsCache::STRING_SPLIT_SUBSTRINGS));
+ Handle<Object> cached_answer(
+ RegExpResultsCache::Lookup(isolate->heap(),
+ *subject,
+ *pattern,
+ RegExpResultsCache::STRING_SPLIT_SUBSTRINGS),
+ isolate);
if (*cached_answer != Smi::FromInt(0)) {
// The cache FixedArray is a COW-array and can therefore be reused.
Handle<JSArray> result =
@@ -6252,7 +5982,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
int part_start = 0;
for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle;
+ HandleScope local_loop_handle(isolate);
int part_end = indices.at(i);
Handle<String> substring =
isolate->factory()->NewProperSubString(subject, part_start, part_end);
@@ -6279,7 +6009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
// not in the cache and fills the remainder with smi zeros. Returns
// the length of the successfully copied prefix.
static int CopyCachedAsciiCharsToArray(Heap* heap,
- const char* chars,
+ const uint8_t* chars,
FixedArray* elements,
int length) {
AssertNoAllocation no_gc;
@@ -6320,7 +6050,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
Handle<FixedArray> elements;
int position = 0;
- if (s->IsFlat() && s->IsAsciiRepresentation()) {
+ if (s->IsFlat() && s->IsOneByteRepresentation()) {
// Try using cached chars where possible.
Object* obj;
{ MaybeObject* maybe_obj =
@@ -6330,7 +6060,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
elements = Handle<FixedArray>(FixedArray::cast(obj), isolate);
String::FlatContent content = s->GetFlatContent();
if (content.IsAscii()) {
- Vector<const char> chars = content.ToAsciiVector();
+ Vector<const uint8_t> chars = content.ToOneByteVector();
// Note, this will initialize all elements (not only the prefix)
// to prevent GC from seeing partially initialized array.
position = CopyCachedAsciiCharsToArray(isolate->heap(),
@@ -6346,7 +6076,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
elements = isolate->factory()->NewFixedArray(length);
}
for (int i = position; i < length; ++i) {
- Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i));
+ Handle<Object> str =
+ LookupSingleCharacterStringFromCode(isolate, s->Get(i));
elements->set(i, *str);
}
@@ -6361,7 +6092,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, value, 0);
return value->ToObject();
@@ -6376,7 +6107,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
@@ -6387,7 +6118,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
Object* number = args[0];
@@ -6398,7 +6129,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6412,7 +6143,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6431,7 +6162,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
@@ -6440,7 +6171,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(number, 0);
@@ -6456,7 +6187,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) {
// Converts a Number to a Smi, if possible. Returns NaN if the number is not
// a small integer.
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
Object* obj = args[0];
@@ -6475,14 +6206,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->AllocateHeapNumber(0);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6492,7 +6223,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6502,7 +6233,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6512,7 +6243,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6521,7 +6252,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 0);
return isolate->heap()->NumberFromDouble(9876543210.0);
@@ -6529,7 +6260,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6539,7 +6270,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -6552,7 +6283,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, str1, 0);
CONVERT_ARG_CHECKED(String, str2, 1);
@@ -6601,12 +6332,12 @@ static inline void StringBuilderConcatHelper(String* special,
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x14);
}
int array_length = args.smi_at(1);
CONVERT_ARG_CHECKED(String, special, 2);
@@ -6619,7 +6350,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
int special_length = special->length();
if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -6633,7 +6364,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (first->IsString()) return first;
}
- bool ascii = special->HasOnlyAsciiChars();
+ bool one_byte = special->IsOneByteConvertible();
int position = 0;
for (int i = 0; i < array_length; i++) {
int increment = 0;
@@ -6653,37 +6384,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// Get the position and check that it is a positive smi.
i++;
if (i >= array_length) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
Object* next_smi = fixed_array->get(i);
if (!next_smi->IsSmi()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
pos = Smi::cast(next_smi)->value();
if (pos < 0) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
}
ASSERT(pos >= 0);
ASSERT(len >= 0);
if (pos > special_length || len > special_length - pos) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
increment = len;
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
increment = element_length;
- if (ascii && !element->HasOnlyAsciiChars()) {
- ascii = false;
+ if (one_byte && !element->IsOneByteConvertible()) {
+ one_byte = false;
}
} else {
ASSERT(!elt->IsTheHole());
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
if (increment > String::kMaxLength - position) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x15);
}
position += increment;
}
@@ -6691,12 +6422,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
int length = position;
Object* object;
- if (ascii) {
+ if (one_byte) {
{ MaybeObject* maybe_object =
- isolate->heap()->AllocateRawAsciiString(length);
+ isolate->heap()->AllocateRawOneByteString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
- SeqAsciiString* answer = SeqAsciiString::cast(object);
+ SeqOneByteString* answer = SeqOneByteString::cast(object);
StringBuilderConcatHelper(special,
answer->GetChars(),
fixed_array,
@@ -6718,18 +6449,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, array, 0);
if (!args[1]->IsSmi()) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x16);
}
int array_length = args.smi_at(1);
CONVERT_ARG_CHECKED(String, separator, 2);
if (!array->HasFastObjectElements()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
FixedArray* fixed_array = FixedArray::cast(array->elements());
if (fixed_array->length() < array_length) {
@@ -6748,20 +6479,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
(String::kMaxLength + separator_length - 1) / separator_length;
if (max_nof_separators < (array_length - 1)) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x17);
}
int length = (array_length - 1) * separator_length;
for (int i = 0; i < array_length; i++) {
Object* element_obj = fixed_array->get(i);
if (!element_obj->IsString()) {
// TODO(1161): handle this case.
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
String* element = String::cast(element_obj);
int increment = element->length();
if (increment > String::kMaxLength - length) {
isolate->context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
+ return Failure::OutOfMemoryException(0x18);
}
length += increment;
}
@@ -6796,7 +6527,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) {
}
ASSERT(sink == end);
- ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead.
+ // Use %_FastAsciiArrayJoin instead.
+ ASSERT(!answer->IsOneByteRepresentation());
return answer;
}
@@ -6842,7 +6574,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements,
RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSArray, elements_array, 0);
RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements());
@@ -6855,10 +6587,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Find total length of join result.
int string_length = 0;
- bool is_ascii = separator->IsAsciiRepresentation();
+ bool is_ascii = separator->IsOneByteRepresentation();
int max_string_length;
if (is_ascii) {
- max_string_length = SeqAsciiString::kMaxLength;
+ max_string_length = SeqOneByteString::kMaxLength;
} else {
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6872,7 +6604,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_ASSERT(elements->get(i + 1)->IsString());
String* string = String::cast(elements->get(i + 1));
int length = string->length();
- if (is_ascii && !string->IsAsciiRepresentation()) {
+ if (is_ascii && !string->IsOneByteRepresentation()) {
is_ascii = false;
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6908,16 +6640,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
if (is_ascii) {
MaybeObject* result_allocation =
- isolate->heap()->AllocateRawAsciiString(string_length);
+ isolate->heap()->AllocateRawOneByteString(string_length);
if (result_allocation->IsFailure()) return result_allocation;
- SeqAsciiString* result_string =
- SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
- JoinSparseArrayWithSeparator<char>(elements,
- elements_length,
- array_length,
- separator,
- Vector<char>(result_string->GetChars(),
- string_length));
+ SeqOneByteString* result_string =
+ SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
+ JoinSparseArrayWithSeparator<uint8_t>(elements,
+ elements_length,
+ array_length,
+ separator,
+ Vector<uint8_t>(
+ result_string->GetChars(),
+ string_length));
return result_string;
} else {
MaybeObject* result_allocation =
@@ -6937,7 +6670,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6947,7 +6680,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6957,7 +6690,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6967,7 +6700,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6976,7 +6709,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -6986,7 +6719,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
@@ -6996,7 +6729,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
@@ -7006,7 +6739,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7025,7 +6758,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -7043,7 +6776,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSObject, lhs, 1);
@@ -7051,12 +6784,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
bool result;
- v8::UserObjectComparisonCallback callback = isolate->UserObjectComparisonCallback();
+ v8::UserObjectComparisonCallback callback =
+ isolate->UserObjectComparisonCallback();
if (callback) {
HandleScope scope(isolate);
Handle<JSObject> lhs_handle(lhs);
Handle<JSObject> rhs_handle(rhs);
- result = callback(v8::Utils::ToLocal(lhs_handle), v8::Utils::ToLocal(rhs_handle));
+ result = callback(v8::Utils::ToLocal(lhs_handle),
+ v8::Utils::ToLocal(rhs_handle));
} else {
result = (lhs == rhs);
}
@@ -7066,7 +6801,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_UserObjectEquals) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7081,7 +6816,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) {
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(x_value, 0);
CONVERT_SMI_ARG_CHECKED(y_value, 1);
@@ -7155,23 +6890,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) {
}
-static Object* StringInputBufferCompare(RuntimeState* state,
+static Object* StringCharacterStreamCompare(RuntimeState* state,
String* x,
String* y) {
- StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx();
- StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy();
- bufx.Reset(x);
- bufy.Reset(y);
- while (bufx.has_more() && bufy.has_more()) {
- int d = bufx.GetNext() - bufy.GetNext();
+ StringCharacterStream stream_x(x, state->string_iterator_compare_x());
+ StringCharacterStream stream_y(y, state->string_iterator_compare_y());
+ while (stream_x.HasMore() && stream_y.HasMore()) {
+ int d = stream_x.GetNext() - stream_y.GetNext();
if (d < 0) return Smi::FromInt(LESS);
else if (d > 0) return Smi::FromInt(GREATER);
}
// x is (non-trivial) prefix of y:
- if (bufy.has_more()) return Smi::FromInt(LESS);
+ if (stream_y.HasMore()) return Smi::FromInt(LESS);
// y is prefix of x:
- return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL);
+ return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL);
}
@@ -7190,9 +6923,9 @@ static Object* FlatStringCompare(String* x, String* y) {
String::FlatContent x_content = x->GetFlatContent();
String::FlatContent y_content = y->GetFlatContent();
if (x_content.IsAscii()) {
- Vector<const char> x_chars = x_content.ToAsciiVector();
+ Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
if (y_content.IsAscii()) {
- Vector<const char> y_chars = y_content.ToAsciiVector();
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
@@ -7201,7 +6934,7 @@ static Object* FlatStringCompare(String* x, String* y) {
} else {
Vector<const uc16> x_chars = x_content.ToUC16Vector();
if (y_content.IsAscii()) {
- Vector<const char> y_chars = y_content.ToAsciiVector();
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
} else {
Vector<const uc16> y_chars = y_content.ToUC16Vector();
@@ -7215,13 +6948,13 @@ static Object* FlatStringCompare(String* x, String* y) {
result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
}
ASSERT(result ==
- StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y));
+ StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y));
return result;
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(String, x, 0);
@@ -7251,12 +6984,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) {
}
return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y)
- : StringInputBufferCompare(isolate->runtime_state(), x, y);
+ : StringCharacterStreamCompare(isolate->runtime_state(), x, y);
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_acos()->Increment();
@@ -7266,7 +6999,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_asin()->Increment();
@@ -7276,7 +7009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_atan()->Increment();
@@ -7289,7 +7022,7 @@ static const double kPiDividedBy4 = 0.78539816339744830962;
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_atan2()->Increment();
@@ -7312,7 +7045,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_ceil()->Increment();
@@ -7322,7 +7055,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_cos()->Increment();
@@ -7332,17 +7065,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
+ lazily_initialize_fast_exp();
+ return isolate->heap()->NumberFromDouble(fast_exp(x));
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_floor()->Increment();
@@ -7352,7 +7086,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_log()->Increment();
@@ -7363,7 +7097,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
// Slow version of Math.pow. We check for fast paths for special cases.
// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7377,19 +7111,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- int y_int = static_cast<int>(y);
- double result;
- if (y == y_int) {
- result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
- } else if (y == 0.5) {
- result = (isinf(x)) ? V8_INFINITY
- : fast_sqrt(x + 0.0); // Convert -0 to +0.
- } else if (y == -0.5) {
- result = (isinf(x)) ? 0
- : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0.
- } else {
- result = power_double_double(x, y);
- }
+ double result = power_helper(x, y);
if (isnan(result)) return isolate->heap()->nan_value();
return isolate->heap()->AllocateHeapNumber(result);
}
@@ -7397,7 +7119,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
// Fast version of Math.pow if we know that y is not an integer and y is not
// -0.5 or 0.5. Used as slow case from full codegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
isolate->counters()->math_pow()->Increment();
@@ -7414,7 +7136,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_round()->Increment();
@@ -7457,7 +7179,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sin()->Increment();
@@ -7467,7 +7189,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
@@ -7477,7 +7199,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
isolate->counters()->math_tan()->Increment();
@@ -7487,7 +7209,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(year, 0);
@@ -7630,7 +7352,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
JSFunction* callee = JSFunction::cast(args[0]);
@@ -7687,10 +7409,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) {
// into C++ code. Collect these in a newly allocated array of handles (possibly
// prefixed by a number of empty handles).
static SmartArrayPointer<Handle<Object> > GetCallerArguments(
+ Isolate* isolate,
int prefix_argc,
int* total_argc) {
// Find frame containing arguments passed to the caller.
- JavaScriptFrameIterator it;
+ JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
@@ -7709,7 +7432,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = args_slots[i].GetValue();
+ Handle<Object> val = args_slots[i].GetValue(isolate);
param_data[prefix_argc + i] = val;
}
@@ -7725,7 +7448,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments(
SmartArrayPointer<Handle<Object> > param_data(
NewArray<Handle<Object> >(*total_argc));
for (int i = 0; i < args_count; i++) {
- Handle<Object> val = Handle<Object>(frame->GetParameter(i));
+ Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate);
param_data[prefix_argc + i] = val;
}
return param_data;
@@ -7744,7 +7467,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
bound_function->shared()->set_bound(true);
// Get all arguments of calling function (Function.prototype.bind).
int argc = 0;
- SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc);
+ SmartArrayPointer<Handle<Object> > arguments =
+ GetCallerArguments(isolate, 0, &argc);
// Don't count the this-arg.
if (argc > 0) {
ASSERT(*arguments[0] == args[2]);
@@ -7761,7 +7485,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
JSFunction::cast(*bindee)->function_bindings());
new_bindings =
isolate->factory()->NewFixedArray(old_bindings->length() + argc);
- bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex));
+ bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex),
+ isolate);
i = 0;
for (int n = old_bindings->length(); i < n; i++) {
new_bindings->set(i, old_bindings->get(i));
@@ -7782,11 +7507,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
bound_function->set_function_bindings(*new_bindings);
// Update length.
- Handle<String> length_symbol = isolate->factory()->length_symbol();
+ Handle<String> length_string = isolate->factory()->length_string();
Handle<Object> new_length(args.at<Object>(3));
PropertyAttributes attr =
static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
- ForceSetProperty(bound_function, length_symbol, new_length, attr);
+ ForceSetProperty(bound_function, length_string, new_length, attr);
return *bound_function;
}
@@ -7820,16 +7545,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) {
Handle<FixedArray>(FixedArray::cast(function->function_bindings()));
int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex;
Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)));
+ JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
+ isolate);
ASSERT(!bound_function->IsJSFunction() ||
!Handle<JSFunction>::cast(bound_function)->shared()->bound());
int total_argc = 0;
SmartArrayPointer<Handle<Object> > param_data =
- GetCallerArguments(bound_argc, &total_argc);
+ GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
param_data[i] = Handle<Object>(bound_args->get(
- JSFunction::kBoundArgumentsStartIndex + i));
+ JSFunction::kBoundArgumentsStartIndex + i), isolate);
}
if (!bound_function->IsJSFunction()) {
@@ -8029,7 +7755,34 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation);
Compiler::RecompileParallel(args.at<JSFunction>(0));
- return *isolate->factory()->undefined_value();
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
+ if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
+ if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+ return isolate->Throw(*isolate->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Recompile queue is full.")));
+ }
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
+ Compiler::RecompileParallel(fun);
+ return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
+ if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
+ HandleScope handle_scope(isolate);
+ ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
+ Handle<SharedFunctionInfo> shared(fun->shared());
+ while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
+ return isolate->heap()->undefined_value();
}
@@ -8058,6 +7811,17 @@ class ActivationsFinder : public ThreadVisitor {
};
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 0);
+ Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
+ ASSERT(isolate->heap()->IsAllocationAllowed());
+ ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB);
+ delete deoptimizer;
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -8066,9 +7830,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) {
static_cast<Deoptimizer::BailoutType>(args.smi_at(0));
Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
ASSERT(isolate->heap()->IsAllocationAllowed());
- JavaScriptFrameIterator it(isolate);
+
+ ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB);
// Make sure to materialize objects before causing any allocation.
+ JavaScriptFrameIterator it(isolate);
deoptimizer->MaterializeHeapObjects(&it);
delete deoptimizer;
@@ -8172,7 +7938,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
if (args.length() == 2 &&
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
- CHECK(type->IsEqualTo(CStrVector("osr")));
+ CHECK(type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")));
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
unoptimized->set_allow_osr_at_loop_nesting_level(
Code::kMaxLoopNestingMarker);
@@ -8304,15 +8070,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
function->PrintName();
PrintF("]\n");
}
- Handle<Code> check_code;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- check_code = interrupt_stub.GetCode();
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- check_code = check_stub.GetCode();
- }
+ InterruptStub interrupt_stub;
+ Handle<Code> check_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
@@ -8370,12 +8129,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) {
MaybeObject* maybe = args[1 + i];
Object* object;
if (!maybe->To<Object>(&object)) return maybe;
- argv[i] = Handle<Object>(object);
+ argv[i] = Handle<Object>(object, isolate);
}
bool threw;
Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver);
+ Handle<Object> hreceiver(receiver, isolate);
Handle<Object> result =
Execution::Call(hfun, hreceiver, argc, argv, &threw, true);
@@ -8436,7 +8195,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
@@ -8456,12 +8215,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, function, 0);
SharedFunctionInfo* shared = function->shared();
- // TODO: The QML mode should be checked in the ContextLength function.
+ // TODO(pvarga): The QML mode should be checked in the ContextLength function.
int length = shared->scope_info()->ContextLength(shared->qml_mode());
Context* result;
MaybeObject* maybe_result =
@@ -8475,7 +8234,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
JSObject* extension_object;
if (args[0]->IsJSObject()) {
@@ -8519,7 +8278,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 3);
String* name = String::cast(args[0]);
Object* thrown_object = args[1];
@@ -8545,7 +8304,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
ScopeInfo* scope_info = ScopeInfo::cast(args[0]);
JSFunction* function;
@@ -8576,20 +8335,89 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
+ ASSERT(args.length() == 2);
+ CONVERT_SMI_ARG_CHECKED(index, 0);
+
+ if (!args[1]->IsScopeInfo()) {
+ // Module already initialized. Find hosting context and retrieve context.
+ Context* host = Context::cast(isolate->context())->global_context();
+ Context* context = Context::cast(host->get(index));
+ ASSERT(context->previous() == isolate->context());
+ isolate->set_context(context);
+ return context;
+ }
+
+ CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
- Context* context = Context::cast(instance->context());
+ // Allocate module context.
+ HandleScope scope(isolate);
+ Factory* factory = isolate->factory();
+ Handle<Context> context = factory->NewModuleContext(scope_info);
+ Handle<JSModule> module = factory->NewJSModule(context, scope_info);
+ context->set_module(*module);
Context* previous = isolate->context();
- ASSERT(context->IsModuleContext());
- // Initialize the context links.
context->set_previous(previous);
context->set_closure(previous->closure());
context->set_global_object(previous->global_object());
- isolate->set_context(context);
+ isolate->set_context(*context);
- return context;
+ // Find hosting scope and initialize internal variable holding module there.
+ previous->global_context()->set(index, *context);
+
+ return *context;
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
+ Context* host_context = isolate->context();
+
+ for (int i = 0; i < descriptions->length(); ++i) {
+ Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i)));
+ int host_index = description->host_index();
+ Handle<Context> context(Context::cast(host_context->get(host_index)));
+ Handle<JSModule> module(context->module());
+
+ for (int j = 0; j < description->length(); ++j) {
+ Handle<String> name(description->name(j));
+ VariableMode mode = description->mode(j);
+ int index = description->index(j);
+ switch (mode) {
+ case VAR:
+ case LET:
+ case CONST:
+ case CONST_HARMONY: {
+ PropertyAttributes attr =
+ IsImmutableVariableMode(mode) ? FROZEN : SEALED;
+ Handle<AccessorInfo> info =
+ Accessors::MakeModuleExport(name, index, attr);
+ Handle<Object> result = SetAccessor(module, info);
+ ASSERT(!(result.is_null() || result->IsUndefined()));
+ USE(result);
+ break;
+ }
+ case MODULE: {
+ Object* referenced_context = Context::cast(host_context)->get(index);
+ Handle<JSModule> value(Context::cast(referenced_context)->module());
+ JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
+ break;
+ }
+ case INTERNAL:
+ case TEMPORARY:
+ case DYNAMIC:
+ case DYNAMIC_GLOBAL:
+ case DYNAMIC_LOCAL:
+ UNREACHABLE();
+ }
+ }
+
+ JSObject::PreventExtensions(module);
+ }
+
+ ASSERT(!isolate->has_pending_exception());
+ return isolate->heap()->undefined_value();
}
@@ -8751,9 +8579,11 @@ static ObjectPair LoadContextSlotHelper(Arguments args,
Handle<JSObject> object = Handle<JSObject>::cast(holder);
ASSERT(object->HasProperty(*name));
// GetProperty below can cause GC.
- Handle<Object> receiver_handle(object->IsGlobalObject()
- ? GlobalObject::cast(*object)->global_receiver()
- : ComputeReceiverForNonGlobal(isolate, *object));
+ Handle<Object> receiver_handle(
+ object->IsGlobalObject()
+ ? GlobalObject::cast(*object)->global_receiver()
+ : ComputeReceiverForNonGlobal(isolate, *object),
+ isolate);
// No need to unhole the value here. This is taken care of by the
// GetProperty function.
@@ -8919,7 +8749,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) {
// First check if this is a real stack overflow.
if (isolate->stack_guard()->IsStackOverflow()) {
- NoHandleAllocation na;
+ NoHandleAllocation na(isolate);
return isolate->StackOverflow();
}
@@ -8933,17 +8763,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) {
}
-static int StackSize() {
+static int StackSize(Isolate* isolate) {
int n = 0;
- for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++;
+ for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
return n;
}
-static void PrintTransition(Object* result) {
+static void PrintTransition(Isolate* isolate, Object* result) {
// indentation
{ const int nmax = 80;
- int n = StackSize();
+ int n = StackSize(isolate);
if (n <= nmax)
PrintF("%4d:%*s", n, n, "");
else
@@ -8951,7 +8781,7 @@ static void PrintTransition(Object* result) {
}
if (result == NULL) {
- JavaScriptFrame::PrintTop(stdout, true, false);
+ JavaScriptFrame::PrintTop(isolate, stdout, true, false);
PrintF(" {\n");
} else {
// function result
@@ -8964,21 +8794,21 @@ static void PrintTransition(Object* result) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) {
ASSERT(args.length() == 0);
- NoHandleAllocation ha;
- PrintTransition(NULL);
+ NoHandleAllocation ha(isolate);
+ PrintTransition(isolate, NULL);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) {
- NoHandleAllocation ha;
- PrintTransition(args[0]);
+ NoHandleAllocation ha(isolate);
+ PrintTransition(isolate, args[0]);
return args[0]; // return TOS
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
#ifdef DEBUG
@@ -9010,14 +8840,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) {
ASSERT(args.length() == 0);
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
isolate->PrintStack();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 0);
// According to ECMA-262, section 15.9.1, page 117, the precision of
@@ -9050,7 +8880,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
bool result;
String::FlatContent str_content = str->GetFlatContent();
if (str_content.IsAscii()) {
- result = DateParser::Parse(str_content.ToAsciiVector(),
+ result = DateParser::Parse(str_content.ToOneByteVector(),
output_array,
isolate->unicode_cache());
} else {
@@ -9069,7 +8899,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -9080,7 +8910,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -9107,7 +8937,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
- if (source->IsSeqAsciiString()) {
+ if (source->IsSeqOneByteString()) {
result = JsonParser<true>::Parse(source, zone);
} else {
result = JsonParser<false>::Parse(source, zone);
@@ -9349,7 +9179,7 @@ class ArrayConcatVisitor {
clear_storage();
set_storage(*result);
}
-}
+ }
void increase_index_offset(uint32_t delta) {
if (JSObject::kMaxElementCount - index_offset_ < delta) {
@@ -9387,8 +9217,8 @@ class ArrayConcatVisitor {
current_storage->length()));
uint32_t current_length = static_cast<uint32_t>(current_storage->length());
for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope;
- Handle<Object> element(current_storage->get(i));
+ HandleScope loop_scope(isolate_);
+ Handle<Object> element(current_storage->get(i), isolate_);
if (!element->IsTheHole()) {
Handle<SeededNumberDictionary> new_storage =
isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element);
@@ -9440,16 +9270,28 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ if (array->elements()->IsFixedArray()) {
+ ASSERT(FixedArray::cast(array->elements())->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->is_the_hole(i)) element_count++;
+ }
break;
+ }
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(array->elements()));
int capacity = dictionary->Capacity();
for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i));
+ Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
if (dictionary->IsKey(*key)) {
element_count++;
}
@@ -9490,16 +9332,17 @@ static void IterateExternalArrayElements(Isolate* isolate,
if (elements_are_ints) {
if (elements_are_guaranteed_smis) {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))));
+ HandleScope loop_scope(isolate);
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
+ isolate);
visitor->visit(j, e);
}
} else {
for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
int64_t val = static_cast<int64_t>(array->get_scalar(j));
if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)));
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
visitor->visit(j, e);
} else {
Handle<Object> e =
@@ -9529,6 +9372,7 @@ static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
static void CollectElementIndices(Handle<JSObject> object,
uint32_t range,
List<uint32_t>* indices) {
+ Isolate* isolate = object->GetIsolate();
ElementsKind kind = object->GetElementsKind();
switch (kind) {
case FAST_SMI_ELEMENTS:
@@ -9556,8 +9400,8 @@ static void CollectElementIndices(Handle<JSObject> object,
SeededNumberDictionary::cast(object->elements()));
uint32_t capacity = dict->Capacity();
for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope;
- Handle<Object> k(dict->KeyAt(j));
+ HandleScope loop_scope(isolate);
+ Handle<Object> k(dict->KeyAt(j), isolate);
if (dict->IsKey(*k)) {
ASSERT(k->IsNumber());
uint32_t index = static_cast<uint32_t>(k->Number());
@@ -9636,7 +9480,7 @@ static void CollectElementIndices(Handle<JSObject> object,
}
}
- Handle<Object> prototype(object->GetPrototype());
+ Handle<Object> prototype(object->GetPrototype(), isolate);
if (prototype->IsJSObject()) {
// The prototype will usually have no inherited element indices,
// but we have to check.
@@ -9686,8 +9530,27 @@ static bool IterateElements(Isolate* isolate,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // TODO(1810): Decide if it's worthwhile to implement this.
- UNREACHABLE();
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(receiver->elements()));
+ int fast_length = static_cast<int>(length);
+ ASSERT(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope(isolate);
+ if (!elements->is_the_hole(j)) {
+ double double_value = elements->get_scalar(j);
+ Handle<Object> element_value =
+ isolate->factory()->NewNumber(double_value);
+ visitor->visit(j, element_value);
+ } else if (receiver->HasElement(j)) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ Handle<Object> element_value = Object::GetElement(receiver, j);
+ RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
+ visitor->visit(j, element_value);
+ }
+ }
break;
}
case DICTIONARY_ELEMENTS: {
@@ -9700,7 +9563,7 @@ static bool IterateElements(Isolate* isolate,
int j = 0;
int n = indices.length();
while (j < n) {
- HandleScope loop_scope;
+ HandleScope loop_scope(isolate);
uint32_t index = indices[j];
Handle<Object> element = Object::GetElement(receiver, index);
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false);
@@ -9716,7 +9579,7 @@ static bool IterateElements(Isolate* isolate,
Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast(
receiver->elements()));
for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)));
+ Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
visitor->visit(j, e);
}
break;
@@ -9790,48 +9653,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
+ ElementsKind kind = FAST_SMI_ELEMENTS;
+
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
- {
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope;
- Handle<Object> obj(elements->get(i));
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- // TODO(1810): Find out if it's worthwhile to properly support
- // arbitrary ElementsKinds. For now, pessimistically transition to
- // FAST_*_ELEMENTS.
- if (array->HasFastDoubleElements()) {
- ElementsKind to_kind = FAST_ELEMENTS;
- if (array->HasFastHoleyElements()) {
- to_kind = FAST_HOLEY_ELEMENTS;
- }
- array = Handle<JSArray>::cast(
- JSObject::TransitionElementsKind(array, to_kind));
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> obj(elements->get(i), isolate);
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate = static_cast<uint32_t>(array->length()->Number());
+ if (length_estimate != 0) {
+ ElementsKind array_kind =
+ GetPackedElementsKind(array->map()->elements_kind());
+ if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
+ kind = array_kind;
}
- length_estimate =
- static_cast<uint32_t>(array->length()->Number());
- element_estimate =
- EstimateElementCount(array);
- } else {
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
}
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
+ element_estimate = EstimateElementCount(array);
+ } else {
+ if (obj->IsHeapObject()) {
+ if (obj->IsNumber()) {
+ if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
+ kind = FAST_DOUBLE_ELEMENTS;
+ }
+ } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
+ kind = FAST_ELEMENTS;
+ }
}
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length <
+ length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements <
+ element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
}
}
@@ -9842,8 +9708,76 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<FixedArray> storage;
if (fast_case) {
- // The backing storage array must have non-existing elements to
- // preserve holes across concat operations.
+ if (kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedDoubleArray> double_storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ bool failure = false;
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj(elements->get(i), isolate);
+ if (obj->IsSmi()) {
+ double_storage->set(j, Smi::cast(*obj)->value());
+ j++;
+ } else if (obj->IsNumber()) {
+ double_storage->set(j, obj->Number());
+ j++;
+ } else {
+ JSArray* array = JSArray::cast(*obj);
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ switch (array->map()->elements_kind()) {
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty fixed array indicates that there are no elements.
+ if (array->elements()->IsFixedArray()) break;
+ FixedDoubleArray* elements =
+ FixedDoubleArray::cast(array->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (elements->is_the_hole(i)) {
+ failure = true;
+ break;
+ }
+ double double_value = elements->get_scalar(i);
+ double_storage->set(j, double_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* elements(
+ FixedArray::cast(array->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Object* element = elements->get(i);
+ if (element->IsTheHole()) {
+ failure = true;
+ break;
+ }
+ int32_t int_value = Smi::cast(element)->value();
+ double_storage->set(j, int_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ ASSERT_EQ(0, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (failure) break;
+ }
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = isolate->factory()->GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*double_storage);
+ return *array;
+ }
+ // The backing storage array must have non-existing elements to preserve
+ // holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
estimate_result_length);
} else {
@@ -9857,7 +9791,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
ArrayConcatVisitor visitor(isolate, storage, fast_case);
for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i));
+ Handle<Object> obj(elements->get(i), isolate);
if (obj->IsJSArray()) {
Handle<JSArray> array = Handle<JSArray>::cast(obj);
if (!IterateElements(isolate, array, &visitor)) {
@@ -9876,13 +9810,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// This will not allocate (flatten the string), but it may run
// very slowly for very deeply nested ConsStrings. For debugging use only.
RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(String, string, 0);
- StringInputBuffer buffer(string);
- while (buffer.has_more()) {
- uint16_t character = buffer.GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(string, &op);
+ while (stream.HasMore()) {
+ uint16_t character = stream.GetNext();
PrintF("%c", character);
}
return string;
@@ -10057,8 +9992,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
return value;
case FIELD:
value =
- JSObject::cast(
- result->holder())->FastPropertyAt(result->GetFieldIndex());
+ JSObject::cast(result->holder())->FastPropertyAt(
+ result->GetFieldIndex().field_index());
if (value->IsTheHole()) {
return heap->undefined_value();
}
@@ -10289,7 +10224,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) {
if (isolate->debug()->break_id() == 0 ||
break_id != isolate->debug()->break_id()) {
return isolate->Throw(
- isolate->heap()->illegal_execution_state_symbol());
+ isolate->heap()->illegal_execution_state_string());
}
return isolate->heap()->true_value();
@@ -10493,7 +10428,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
- ASSERT(*scope_info != ScopeInfo::Empty());
+ ASSERT(*scope_info != ScopeInfo::Empty(isolate));
// Get the locals names and values into a temporary array.
//
@@ -10682,33 +10617,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) {
}
-// Copy all the context locals into an object used to materialize a scope.
-static bool CopyContextLocalsToScopeObject(
- Isolate* isolate,
- Handle<ScopeInfo> scope_info,
- Handle<Context> context,
- Handle<JSObject> scope_object) {
- // Fill all context locals to the context extension.
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- VariableMode mode;
- InitializationFlag init_flag;
- int context_index = scope_info->ContextSlotIndex(
- scope_info->ContextLocalName(i), &mode, &init_flag);
-
- RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
- SetProperty(scope_object,
- Handle<String>(scope_info->ContextLocalName(i)),
- Handle<Object>(context->get(context_index), isolate),
- NONE,
- kNonStrictMode),
- false);
- }
-
- return true;
-}
-
-
// Create a plain JSObject which materializes the local scope for the specified
// frame.
static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
@@ -10726,13 +10634,15 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
// First fill all parameters.
for (int i = 0; i < scope_info->ParameterCount(); ++i) {
- Handle<Object> value(
- i < frame_inspector->GetParametersCount() ?
- frame_inspector->GetParameter(i) : isolate->heap()->undefined_value());
+ Handle<Object> value(i < frame_inspector->GetParametersCount()
+ ? frame_inspector->GetParameter(i)
+ : isolate->heap()->undefined_value(),
+ isolate);
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(local_scope,
+ SetProperty(isolate,
+ local_scope,
Handle<String>(scope_info->ParameterName(i)),
value,
NONE,
@@ -10744,9 +10654,10 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(local_scope,
+ SetProperty(isolate,
+ local_scope,
Handle<String>(scope_info->StackLocalName(i)),
- Handle<Object>(frame_inspector->GetExpression(i)),
+ Handle<Object>(frame_inspector->GetExpression(i), isolate),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -10756,8 +10667,8 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
// Third fill all context locals.
Handle<Context> frame_context(Context::cast(frame->context()));
Handle<Context> function_context(frame_context->declaration_context());
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, function_context, local_scope)) {
+ if (!scope_info->CopyContextLocalsToScopeObject(
+ isolate, function_context, local_scope)) {
return Handle<JSObject>();
}
@@ -10778,9 +10689,10 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(local_scope,
+ SetProperty(isolate,
+ local_scope,
key,
- GetProperty(ext, key),
+ GetProperty(isolate, ext, key),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -10804,6 +10716,95 @@ static Handle<JSObject> MaterializeLocalScope(
}
+// Set the context local variable value.
+static bool SetContextLocalValue(Isolate* isolate,
+ Handle<ScopeInfo> scope_info,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
+ Handle<String> next_name(scope_info->ContextLocalName(i));
+ if (variable_name->Equals(*next_name)) {
+ VariableMode mode;
+ InitializationFlag init_flag;
+ int context_index =
+ scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
+ context->set(context_index, *new_value);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+static bool SetLocalVariableValue(Isolate* isolate,
+ JavaScriptFrame* frame,
+ int inlined_jsframe_index,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ if (inlined_jsframe_index != 0 || frame->is_optimized()) {
+ // Optimized frames are not supported.
+ return false;
+ }
+
+ Handle<JSFunction> function(JSFunction::cast(frame->function()));
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ bool default_result = false;
+
+ // Parameters.
+ for (int i = 0; i < scope_info->ParameterCount(); ++i) {
+ if (scope_info->ParameterName(i)->Equals(*variable_name)) {
+ frame->SetParameterValue(i, *new_value);
+ // Argument might be shadowed in heap context, don't stop here.
+ default_result = true;
+ }
+ }
+
+ // Stack locals.
+ for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
+ if (scope_info->StackLocalName(i)->Equals(*variable_name)) {
+ frame->SetExpression(i, *new_value);
+ return true;
+ }
+ }
+
+ if (scope_info->HasContext()) {
+ // Context locals.
+ Handle<Context> frame_context(Context::cast(frame->context()));
+ Handle<Context> function_context(frame_context->declaration_context());
+ if (SetContextLocalValue(
+ isolate, scope_info, function_context, variable_name, new_value)) {
+ return true;
+ }
+
+ // Function context extension. These are variables introduced by eval.
+ if (function_context->closure() == *function) {
+ if (function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+
+ if (ext->HasProperty(*variable_name)) {
+ // We don't expect this to do anything except replacing
+ // property value.
+ SetProperty(isolate,
+ ext,
+ variable_name,
+ new_value,
+ NONE,
+ kNonStrictMode);
+ return true;
+ }
+ }
+ }
+ }
+
+ return default_result;
+}
+
+
// Create a plain JSObject which materializes the closure content for the
// context.
static Handle<JSObject> MaterializeClosure(Isolate* isolate,
@@ -10819,8 +10820,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals to the context extension.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, closure_scope)) {
+ if (!scope_info->CopyContextLocalsToScopeObject(
+ isolate, context, closure_scope)) {
return Handle<JSObject>();
}
@@ -10839,9 +10840,10 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(closure_scope,
+ SetProperty(isolate,
+ closure_scope,
key,
- GetProperty(ext, key),
+ GetProperty(isolate, ext, key),
NONE,
kNonStrictMode),
Handle<JSObject>());
@@ -10852,23 +10854,79 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
}
+// This method copies structure of MaterializeClosure method above.
+static bool SetClosureVariableValue(Isolate* isolate,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ ASSERT(context->IsFunctionContext());
+
+ Handle<SharedFunctionInfo> shared(context->closure()->shared());
+ Handle<ScopeInfo> scope_info(shared->scope_info());
+
+ // Context locals to the context extension.
+ if (SetContextLocalValue(
+ isolate, scope_info, context, variable_name, new_value)) {
+ return true;
+ }
+
+ // Properties from the function context extension. This will
+ // be variables introduced by eval.
+ if (context->has_extension()) {
+ Handle<JSObject> ext(JSObject::cast(context->extension()));
+ if (ext->HasProperty(*variable_name)) {
+ // We don't expect this to do anything except replacing property value.
+ SetProperty(isolate,
+ ext,
+ variable_name,
+ new_value,
+ NONE,
+ kNonStrictMode);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
// Create a plain JSObject which materializes the scope for the specified
// catch context.
static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
Handle<Context> context) {
ASSERT(context->IsCatchContext());
Handle<String> name(String::cast(context->extension()));
- Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX));
+ Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
+ isolate);
Handle<JSObject> catch_scope =
isolate->factory()->NewJSObject(isolate->object_function());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
+ SetProperty(isolate,
+ catch_scope,
+ name,
+ thrown_object,
+ NONE,
+ kNonStrictMode),
Handle<JSObject>());
return catch_scope;
}
+static bool SetCatchVariableValue(Isolate* isolate,
+ Handle<Context> context,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ ASSERT(context->IsCatchContext());
+ Handle<String> name(String::cast(context->extension()));
+ if (!name->Equals(*variable_name)) {
+ return false;
+ }
+ context->set(Context::THROWN_OBJECT_INDEX, *new_value);
+ return true;
+}
+
+
// Create a plain JSObject which materializes the block scope for the specified
// block context.
static Handle<JSObject> MaterializeBlockScope(
@@ -10883,8 +10941,8 @@ static Handle<JSObject> MaterializeBlockScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, block_scope)) {
+ if (!scope_info->CopyContextLocalsToScopeObject(
+ isolate, context, block_scope)) {
return Handle<JSObject>();
}
@@ -10906,8 +10964,8 @@ static Handle<JSObject> MaterializeModuleScope(
isolate->factory()->NewJSObject(isolate->object_function());
// Fill all context locals.
- if (!CopyContextLocalsToScopeObject(
- isolate, scope_info, context, module_scope)) {
+ if (!scope_info->CopyContextLocalsToScopeObject(
+ isolate, context, module_scope)) {
return Handle<JSObject>();
}
@@ -11127,6 +11185,33 @@ class ScopeIterator {
return Handle<JSObject>();
}
+ bool SetVariableValue(Handle<String> variable_name,
+ Handle<Object> new_value) {
+ ASSERT(!failed_);
+ switch (Type()) {
+ case ScopeIterator::ScopeTypeGlobal:
+ break;
+ case ScopeIterator::ScopeTypeLocal:
+ return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_,
+ variable_name, new_value);
+ case ScopeIterator::ScopeTypeWith:
+ break;
+ case ScopeIterator::ScopeTypeCatch:
+ return SetCatchVariableValue(isolate_, CurrentContext(),
+ variable_name, new_value);
+ case ScopeIterator::ScopeTypeClosure:
+ return SetClosureVariableValue(isolate_, CurrentContext(),
+ variable_name, new_value);
+ case ScopeIterator::ScopeTypeBlock:
+ // TODO(2399): should we implement it?
+ break;
+ case ScopeIterator::ScopeTypeModule:
+ // TODO(2399): should we implement it?
+ break;
+ }
+ return false;
+ }
+
Handle<ScopeInfo> CurrentScopeInfo() {
ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
@@ -11169,7 +11254,7 @@ class ScopeIterator {
if (!CurrentContext().is_null()) {
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -11193,7 +11278,7 @@ class ScopeIterator {
PrintF("Closure:\n");
CurrentContext()->Print();
if (CurrentContext()->has_extension()) {
- Handle<Object> extension(CurrentContext()->extension());
+ Handle<Object> extension(CurrentContext()->extension(), isolate_);
if (extension->IsJSContextExtensionObject()) {
extension->Print();
}
@@ -11366,13 +11451,71 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
}
+static bool SetScopeVariableValue(ScopeIterator* it, int index,
+ Handle<String> variable_name,
+ Handle<Object> new_value) {
+ for (int n = 0; !it->Done() && n < index; it->Next()) {
+ n++;
+ }
+ if (it->Done()) {
+ return false;
+ }
+ return it->SetVariableValue(variable_name, new_value);
+}
+
+
+// Change variable value in closure or local scope
+// args[0]: number or JsFunction: break id or function
+// args[1]: number: frame index (when arg[0] is break id)
+// args[2]: number: inlined frame index (when arg[0] is break id)
+// args[3]: number: scope index
+// args[4]: string: variable name
+// args[5]: object: new value
+//
+// Return true if success and false otherwise
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 6);
+
+ // Check arguments.
+ CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
+ CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
+ Handle<Object> new_value = args.at<Object>(5);
+
+ bool res;
+ if (args[0]->IsNumber()) {
+ Object* check;
+ { MaybeObject* maybe_check = Runtime_CheckExecutionState(
+ RUNTIME_ARGUMENTS(isolate, args));
+ if (!maybe_check->ToObject(&check)) return maybe_check;
+ }
+ CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
+ CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+
+ // Get the frame where the debugging is performed.
+ StackFrame::Id id = UnwrapFrameId(wrapped_id);
+ JavaScriptFrameIterator frame_it(isolate, id);
+ JavaScriptFrame* frame = frame_it.frame();
+
+ ScopeIterator it(isolate, frame, inlined_jsframe_index);
+ res = SetScopeVariableValue(&it, index, variable_name, new_value);
+ } else {
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ ScopeIterator it(isolate, fun);
+ res = SetScopeVariableValue(&it, index, variable_name, new_value);
+ }
+
+ return isolate->heap()->ToBoolean(res);
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
#ifdef DEBUG
// Print the scopes for the top frame.
- StackFrameLocator locator;
+ StackFrameLocator locator(isolate);
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
for (ScopeIterator it(isolate, frame, 0);
!it.Done();
@@ -11610,7 +11753,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
if (!maybe_check->ToObject(&check)) return maybe_check;
}
if (!args[1]->IsNumber() || !args[2]->IsNumber()) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
// Get the step action and check validity.
@@ -11620,13 +11763,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) {
step_action != StepOut &&
step_action != StepInMin &&
step_action != StepMin) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
// Get the number of steps.
int step_count = NumberToInt32(args[2]);
if (step_count < 1) {
- return isolate->Throw(isolate->heap()->illegal_argument_symbol());
+ return isolate->Throw(isolate->heap()->illegal_argument_string());
}
// Clear all current stepping setup.
@@ -11680,7 +11823,8 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate,
if (scope_info->Type() == CATCH_SCOPE) {
Handle<String> name(String::cast(current->extension()));
- Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX));
+ Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX),
+ isolate);
context =
isolate->factory()->NewCatchContext(function,
context,
@@ -11724,7 +11868,7 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
// does not support eval) then create an 'arguments' object.
int index;
if (scope_info->StackLocalCount() > 0) {
- index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol());
+ index = scope_info->StackSlotIndex(isolate->heap()->arguments_string());
if (index != -1) {
return Handle<Object>(frame->GetExpression(index), isolate);
}
@@ -11734,7 +11878,7 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate,
VariableMode mode;
InitializationFlag init_flag;
index = scope_info->ContextSlotIndex(
- isolate->heap()->arguments_symbol(), &mode, &init_flag);
+ isolate->heap()->arguments_string(), &mode, &init_flag);
if (index != -1) {
return Handle<Object>(function_context->get(index), isolate);
}
@@ -11788,7 +11932,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
CONVERT_ARG_HANDLE_CHECKED(String, source, 3);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4);
- Handle<Object> additional_context(args[5]);
+ Handle<Object> additional_context(args[5], isolate);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@@ -11894,7 +12038,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
Handle<Object> evaluation_function =
Execution::Call(compiled_function, receiver, 0, NULL,
&has_pending_exception, false,
- Handle<Object>(function->context()->qml_global_object()));
+ Handle<Object>(function->context()->qml_global_object(), isolate));
if (has_pending_exception) return Failure::Exception();
Handle<Object> arguments = GetArgumentsObject(isolate,
@@ -11929,7 +12073,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) {
// Skip the global proxy as it has no properties and always delegates to the
// real global object.
if (result->IsJSGlobalProxy()) {
- result = Handle<JSObject>(JSObject::cast(result->GetPrototype()));
+ result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate)));
}
return *result;
@@ -11951,7 +12095,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) {
}
CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2);
- Handle<Object> additional_context(args[3]);
+ Handle<Object> additional_context(args[3], isolate);
// Handle the processing of break.
DisableBreak disable_break_save(disable_break);
@@ -12044,7 +12188,8 @@ static int DebugReferencedBy(HeapIterator* iterator,
Object* instance_filter, int max_references,
FixedArray* instances, int instances_size,
JSFunction* arguments_function) {
- NoHandleAllocation ha;
+ Isolate* isolate = target->GetIsolate();
+ NoHandleAllocation ha(isolate);
AssertNoAllocation no_alloc;
// Iterate the heap.
@@ -12070,7 +12215,7 @@ static int DebugReferencedBy(HeapIterator* iterator,
if (!instance_filter->IsUndefined()) {
Object* V = obj;
while (true) {
- Object* prototype = V->GetPrototype();
+ Object* prototype = V->GetPrototype(isolate);
if (prototype->IsNull()) {
break;
}
@@ -12140,29 +12285,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
+ Heap* heap = isolate->heap();
+ HeapIterator heap_iterator(heap);
count = DebugReferencedBy(&heap_iterator,
target, instance_filter, max_references,
NULL, 0, arguments_function);
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+ { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
// Fill the referencing objects.
// AllocateFixedArray above does not make the heap non-iterable.
- ASSERT(HEAP->IsHeapIterable());
- HeapIterator heap_iterator2;
+ ASSERT(heap->IsHeapIterable());
+ HeapIterator heap_iterator2(heap);
count = DebugReferencedBy(&heap_iterator2,
target, instance_filter, max_references,
instances, count, arguments_function);
// Return result as JS array.
Object* result;
- MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
+ MaybeObject* maybe_result = heap->AllocateJSObject(
isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
return JSArray::cast(result)->SetContent(instances);
@@ -12208,8 +12354,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
ASSERT(args.length() == 2);
// First perform a full GC in order to avoid dead objects.
- isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
- "%DebugConstructedBy");
+ Heap* heap = isolate->heap();
+ heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy");
// Check parameters.
CONVERT_ARG_CHECKED(JSFunction, constructor, 0);
@@ -12218,7 +12364,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Get the number of referencing objects.
int count;
- HeapIterator heap_iterator;
+ HeapIterator heap_iterator(heap);
count = DebugConstructedBy(&heap_iterator,
constructor,
max_references,
@@ -12227,14 +12373,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Allocate an array to hold the result.
Object* object;
- { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count);
+ { MaybeObject* maybe_object = heap->AllocateFixedArray(count);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
FixedArray* instances = FixedArray::cast(object);
ASSERT(HEAP->IsHeapIterable());
// Fill the referencing objects.
- HeapIterator heap_iterator2;
+ HeapIterator heap_iterator2(heap);
count = DebugConstructedBy(&heap_iterator2,
constructor,
max_references,
@@ -12244,7 +12390,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
// Return result as JS array.
Object* result;
{ MaybeObject* maybe_result = isolate->heap()->AllocateJSObject(
- isolate->context()->native_context()->array_function());
+ isolate->context()->native_context()->array_function());
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return JSArray::cast(result)->SetContent(instances);
@@ -12320,7 +12466,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, f, 0);
@@ -12371,19 +12517,20 @@ RUNTIME_FUNCTION(MaybeObject*,
Handle<FixedArray> array;
array = isolate->factory()->NewFixedArray(kBufferSize);
int number;
+ Heap* heap = isolate->heap();
{
- isolate->heap()->EnsureHeapIsIterable();
+ heap->EnsureHeapIsIterable();
AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
+ HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
}
if (number > kBufferSize) {
array = isolate->factory()->NewFixedArray(number);
- isolate->heap()->EnsureHeapIsIterable();
+ heap->EnsureHeapIsIterable();
AssertNoAllocation no_allocations;
- HeapIterator heap_iterator;
+ HeapIterator heap_iterator(heap);
Script* scr = *script;
FixedArray* arr = *array;
FindSharedFunctionInfosForScript(&heap_iterator, scr, arr);
@@ -12593,7 +12740,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) {
const char* error_message =
LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone());
if (error_message) {
- return *(isolate->factory()->LookupAsciiSymbol(error_message));
+ return *(isolate->factory()->InternalizeUtf8String(error_message));
}
return heap->true_value();
}
@@ -12692,218 +12839,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) {
return Smi::FromInt(usage);
}
-
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) {
-#ifdef LIVE_OBJECT_LIST
- return isolate->heap()->true_value();
-#else
- return isolate->heap()->false_value();
-#endif
-}
-
-
-// Captures a live object list from the present heap.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) {
-#ifdef LIVE_OBJECT_LIST
- return LiveObjectList::Capture();
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Deletes the specified live object list.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(id, 0);
- bool success = LiveObjectList::Delete(id);
- return isolate->heap()->ToBoolean(success);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a dump of the objects
-// contained in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// dumped.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(id1, 0);
- CONVERT_SMI_ARG_CHECKED(id2, 1);
- CONVERT_SMI_ARG_CHECKED(start, 2);
- CONVERT_SMI_ARG_CHECKED(count, 3);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4);
- EnterDebugger enter_debugger;
- return LiveObjectList::Dump(id1, id2, start, count, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- Object* result = LiveObjectList::GetObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the obj id for the specified address if valid.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_ARG_HANDLE_CHECKED(String, address, 0);
- Object* result = LiveObjectList::GetObjId(address);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the retainers that references the specified object alive.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject());
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean());
- RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi());
- RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi());
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5);
-
- Handle<JSObject> instance_filter;
- if (args[1]->IsJSObject()) {
- instance_filter = args.at<JSObject>(1);
- }
- bool verbose = false;
- if (args[2]->IsBoolean()) {
- verbose = args[2]->IsTrue();
- }
- int start = 0;
- if (args[3]->IsSmi()) {
- start = args.smi_at(3);
- }
- int limit = Smi::kMaxValue;
- if (args[4]->IsSmi()) {
- limit = args.smi_at(4);
- }
-
- return LiveObjectList::GetObjRetainers(obj_id,
- instance_filter,
- verbose,
- start,
- limit,
- filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets the reference path between 2 objects.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id1, 0);
- CONVERT_SMI_ARG_CHECKED(obj_id2, 1);
- RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject());
-
- Handle<JSObject> instance_filter;
- if (args[2]->IsJSObject()) {
- instance_filter = args.at<JSObject>(2);
- }
-
- Object* result =
- LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a list of all
-// previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) {
-#ifdef LIVE_OBJECT_LIST
- CONVERT_SMI_ARG_CHECKED(start, 0);
- CONVERT_SMI_ARG_CHECKED(count, 1);
- return LiveObjectList::Info(start, count);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Gets a dump of the specified object as requested by the debugger.
-// This is only used for obj ids shown in live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(obj_id, 0);
- Object* result = LiveObjectList::PrintObj(obj_id);
- return result;
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Resets and releases all previously captured live object lists.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) {
-#ifdef LIVE_OBJECT_LIST
- LiveObjectList::Reset();
- return isolate->heap()->undefined_value();
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
-
-// Generates the response to a debugger request for a summary of the types
-// of objects in the difference between the captured live object lists
-// specified by id1 and id2.
-// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be
-// summarized.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) {
-#ifdef LIVE_OBJECT_LIST
- HandleScope scope;
- CONVERT_SMI_ARG_CHECKED(id1, 0);
- CONVERT_SMI_ARG_CHECKED(id2, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2);
-
- EnterDebugger enter_debugger;
- return LiveObjectList::Summarize(id1, id2, filter_obj);
-#else
- return isolate->heap()->undefined_value();
-#endif
-}
-
#endif // ENABLE_DEBUGGER_SUPPORT
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
v8::V8::ResumeProfiler();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) {
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
v8::V8::PauseProfiler();
return isolate->heap()->undefined_value();
}
@@ -12920,9 +12867,10 @@ static Handle<Object> Runtime_GetScriptFromScriptName(
// Scan the heap for Script objects to find the script with the requested
// script data.
Handle<Script> script;
- script_name->GetHeap()->EnsureHeapIsIterable();
+ Heap* heap = script_name->GetHeap();
+ heap->EnsureHeapIsIterable();
AssertNoAllocation no_allocation_during_heap_iteration;
- HeapIterator iterator;
+ HeapIterator iterator(heap);
HeapObject* obj = NULL;
while (script.is_null() && ((obj = iterator.next()) != NULL)) {
// If a script is found check if it has the script data requested.
@@ -12960,47 +12908,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
}
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool ShowFrameInStackTrace(StackFrame* raw_frame,
- Object* caller,
- bool* seen_caller) {
- // Only display JS frames.
- if (!raw_frame->is_java_script()) {
- return false;
- }
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction()) {
- return false;
- }
- if ((raw_fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller.
- if (!(*seen_caller)) return false;
- // Also, skip non-visible built-in functions and any call with the builtins
- // object as receiver, so as to not reveal either the builtins object or
- // an internal function.
- // The --builtins-in-stack-traces command line flag allows including
- // internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- JSFunction* fun = JSFunction::cast(raw_fun);
- if (frame->receiver()->IsJSBuiltinsObject() ||
- (fun->IsBuiltin() && !fun->shared()->native())) {
- return false;
- }
- }
- return true;
-}
-
-
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
@@ -13011,57 +12918,53 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
HandleScope scope(isolate);
- Factory* factory = isolate->factory();
+ // Optionally capture a more detailed stack trace for the message.
+ isolate->CaptureAndSetDetailedStackTrace(error_object);
+ // Capture a simple stack trace for the stack property.
+ return *isolate->CaptureSimpleStackTrace(error_object, caller, limit);
+}
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory->NewFixedArrayWithHoles(initial_size * 4);
- StackFrameIterator iter(isolate);
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
- int cursor = 0;
- int frames_seen = 0;
- while (!iter.done() && frames_seen < limit) {
- StackFrame* raw_frame = iter.frame();
- if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- ASSERT(cursor + 4 <= elements->length());
-
- Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- }
- }
- iter.Advance();
+// Mark a function to recognize when called after GC to format the stack trace.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MarkOneShotGetter) {
+ ASSERT_EQ(args.length(), 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+ HandleScope scope(isolate);
+ Handle<String> key = isolate->factory()->hidden_stack_trace_string();
+ JSObject::SetHiddenProperty(fun, key, key);
+ return *fun;
+}
+
+
+// Retrieve the stack trace. This could be the raw stack trace collected
+// on stack overflow or the already formatted stack trace string.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) {
+ HandleScope scope(isolate);
+ ASSERT_EQ(args.length(), 1);
+ CONVERT_ARG_CHECKED(JSObject, error_object, 0);
+ String* key = isolate->heap()->hidden_stack_trace_string();
+ Object* result = error_object->GetHiddenProperty(key);
+ RUNTIME_ASSERT(result->IsJSArray() ||
+ result->IsString() ||
+ result->IsUndefined());
+ return result;
+}
+
+
+// Set or clear the stack trace attached to an stack overflow error object.
+RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) {
+ HandleScope scope(isolate);
+ ASSERT_EQ(args.length(), 2);
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(HeapObject, value, 1);
+ Handle<String> key = isolate->factory()->hidden_stack_trace_string();
+ if (value->IsUndefined()) {
+ error_object->DeleteHiddenProperty(*key);
+ } else {
+ RUNTIME_ASSERT(value->IsString());
+ JSObject::SetHiddenProperty(error_object, key, value);
}
- Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
- // Capture and attach a more detailed stack trace if necessary.
- isolate->CaptureAndSetCurrentStackTraceFor(error_object);
- result->set_length(Smi::FromInt(cursor));
- return *result;
+ return *error_object;
}
@@ -13069,11 +12972,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
ASSERT_EQ(args.length(), 0);
- NoHandleAllocation ha;
+ NoHandleAllocation ha(isolate);
const char* version_string = v8::V8::GetVersion();
- return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
+ return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string),
NOT_TENURED);
}
@@ -13089,6 +12992,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+ FlattenString(str);
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
@@ -13126,13 +13038,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
HandleScope scope(isolate);
Handle<JSFunctionResultCache> cache_handle(cache);
- Handle<Object> key_handle(key);
+ Handle<Object> key_handle(key, isolate);
Handle<Object> value;
{
Handle<JSFunction> factory(JSFunction::cast(
cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
// TODO(antonm): consider passing a receiver when constructing a cache.
- Handle<Object> receiver(isolate->native_context()->global_object());
+ Handle<Object> receiver(isolate->native_context()->global_object(),
+ isolate);
// This handle is nor shared, nor used later, so it's safe.
Handle<Object> argv[] = { key_handle };
bool pending_exception;
@@ -13202,7 +13115,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) {
// Exclude the code in release mode.
RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
ASSERT(args.length() == 0);
- HandleScope scope;
+ HandleScope scope(isolate);
#define COUNT_ENTRY(Name, argc, ressize) + 1
int entry_count = 0
RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
@@ -13215,7 +13128,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) {
bool inline_runtime_functions = false;
#define ADD_ENTRY(Name, argc, ressize) \
{ \
- HandleScope inner; \
+ HandleScope inner(isolate); \
Handle<String> name; \
/* Inline runtime functions have an underscore in front of the name. */ \
if (inline_runtime_functions) { \
@@ -13250,8 +13163,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
CONVERT_ARG_CHECKED(JSArray, elms, 1);
String::FlatContent format_content = format->GetFlatContent();
RUNTIME_ASSERT(format_content.IsAscii());
- Vector<const char> chars = format_content.ToAsciiVector();
- LOGGER->LogRuntime(chars, elms);
+ Vector<const uint8_t> chars = format_content.ToOneByteVector();
+ LOGGER->LogRuntime(isolate, Vector<const char>::cast(chars), elms);
return isolate->heap()->undefined_value();
}
@@ -13301,6 +13214,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (obj->IsNull()) return isolate->heap()->false_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSReceiver::cast(proto);
+ }
return isolate->heap()->ToBoolean(obj->map()->is_observed());
}
@@ -13309,7 +13228,22 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (obj->IsNull()) return isolate->heap()->undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ obj = JSReceiver::cast(proto);
+ }
+ ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() &&
+ JSObject::cast(obj)->HasFastElements()));
if (obj->map()->is_observed() != is_observed) {
+ if (is_observed && obj->IsJSObject() &&
+ !JSObject::cast(obj)->HasExternalArrayElements()) {
+ // Go to dictionary mode, so that we don't skip map checks.
+ MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements();
+ if (maybe->IsFailure()) return maybe;
+ ASSERT(!JSObject::cast(obj)->HasFastElements());
+ }
MaybeObject* maybe = obj->map()->Copy();
Map* map;
if (!maybe->To(&map)) return maybe;
@@ -13333,39 +13267,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectHashTable) {
- ASSERT(args.length() == 0);
- return ObjectHashTable::Allocate(0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableGet) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
- Object* key = args[1];
- Object* lookup = table->Lookup(key);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : lookup;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableSet) {
+RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) {
HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(ObjectHashTable, table, 0);
- Handle<Object> key = args.at<Object>(1);
- Handle<Object> value = args.at<Object>(2);
- return *PutIntoObjectHashTable(table, key, value);
+ ASSERT(args.length() == 0);
+ // TODO(adamk): Currently this runtime function is only called three times per
+ // isolate. If it's called more often, the map should be moved into the
+ // strong root list.
+ Handle<Map> map =
+ isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
+ Handle<JSWeakMap> weakmap =
+ Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map));
+ return WeakMapInitialize(isolate, weakmap);
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableHas) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
- Object* key = args[1];
- Object* lookup = table->Lookup(key);
- return isolate->heap()->ToBoolean(!lookup->IsTheHole());
+RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) {
+ ASSERT(args.length() == 1);
+ Object* object = args[0];
+ if (object->IsJSGlobalProxy()) {
+ object = object->GetPrototype(isolate);
+ if (object->IsNull()) return isolate->heap()->undefined_value();
+ }
+ return object;
}
@@ -13394,14 +13317,14 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
ASSERT(dictionary != NULL);
ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0);
for (int i = 0; i < kNumFunctions; ++i) {
- Object* name_symbol;
- { MaybeObject* maybe_name_symbol =
- heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name);
- if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol;
+ Object* name_string;
+ { MaybeObject* maybe_name_string =
+ heap->InternalizeUtf8String(kIntrinsicFunctions[i].name);
+ if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string;
}
StringDictionary* string_dictionary = StringDictionary::cast(dictionary);
{ MaybeObject* maybe_dictionary = string_dictionary->Add(
- String::cast(name_symbol),
+ String::cast(name_string),
Smi::FromInt(i),
PropertyDetails(NONE, NORMAL));
if (!maybe_dictionary->ToObject(&dictionary)) {
@@ -13415,7 +13338,7 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap,
}
-const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) {
+const Runtime::Function* Runtime::FunctionForName(Handle<String> name) {
Heap* heap = name->GetHeap();
int entry = heap->intrinsic_function_names()->FindEntry(*name);
if (entry != kNotFound) {
diff --git a/src/3rdparty/v8/src/runtime.h b/src/3rdparty/v8/src/runtime.h
index 6428f89..74cc2d8 100644
--- a/src/3rdparty/v8/src/runtime.h
+++ b/src/3rdparty/v8/src/runtime.h
@@ -85,8 +85,11 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
+ F(ForceParallelRecompile, 1, 1) \
+ F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
+ F(NotifyStubFailure, 0, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
F(ClearFunctionTypeFeedback, 1, 1) \
@@ -101,6 +104,7 @@ namespace internal {
F(StoreArrayLiteralElement, 5, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(FlattenString, 1, 1) \
\
/* Array join support */ \
F(PushIfAbsent, 2, 1) \
@@ -111,7 +115,6 @@ namespace internal {
F(Typeof, 1, 1) \
\
F(StringToNumber, 1, 1) \
- F(StringFromCharCodeArray, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringToLowerCase, 1, 1) \
@@ -120,10 +123,6 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -193,6 +192,10 @@ namespace internal {
\
/* JSON */ \
F(ParseJson, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
+ F(QuoteJSONStringComma, 1, 1) \
+ F(QuoteJSONStringArray, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -200,12 +203,14 @@ namespace internal {
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
F(SubString, 3, 1) \
- F(StringReplaceRegExpWithString, 4, 1) \
+ F(StringReplaceGlobalRegExpWithString, 4, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringMatch, 3, 1) \
F(StringTrim, 3, 1) \
F(StringToArray, 2, 1) \
F(NewStringWrapper, 1, 1) \
+ F(NewString, 2, 1) \
+ F(TruncateString, 2, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -234,6 +239,9 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \
F(CollectStackTrace, 3, 1) \
+ F(MarkOneShotGetter, 1, 1) \
+ F(GetOverflowedStackTrace, 1, 1) \
+ F(SetOverflowedStackTrace, 2, 1) \
F(GetV8Version, 0, 1) \
\
F(ClassOf, 1, 1) \
@@ -289,6 +297,9 @@ namespace internal {
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
+ /* Harmony symbols */ \
+ F(CreateSymbol, 0, 1) \
+ \
/* Harmony proxies */ \
F(CreateJSProxy, 2, 1) \
F(CreateJSFunctionProxy, 4, 1) \
@@ -326,10 +337,8 @@ namespace internal {
F(SetIsObserved, 2, 1) \
F(SetObserverDeliveryPending, 0, 1) \
F(GetObservationState, 0, 1) \
- F(CreateObjectHashTable, 0, 1) \
- F(ObjectHashTableGet, 2, 1) \
- F(ObjectHashTableSet, 3, 1) \
- F(ObjectHashTableHas, 2, 1) \
+ F(ObservationWeakMapCreate, 0, 1) \
+ F(UnwrapGlobalProxy, 1, 1) \
\
/* Statements */ \
F(NewClosure, 3, 1) \
@@ -350,7 +359,7 @@ namespace internal {
F(PushWithContext, 2, 1) \
F(PushCatchContext, 3, 1) \
F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 1, 1) \
+ F(PushModuleContext, 2, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -358,6 +367,7 @@ namespace internal {
\
/* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \
+ F(DeclareModules, 1, 1) \
F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 3 or 4 */, 1) \
F(InitializeConstGlobal, 3, 1) \
@@ -402,6 +412,7 @@ namespace internal {
F(HasExternalFloatElements, 1, 1) \
F(HasExternalDoubleElements, 1, 1) \
F(HasFastProperties, 1, 1) \
+ F(TransitionElementsKind, 2, 1) \
F(TransitionElementsSmiToDouble, 1, 1) \
F(TransitionElementsDoubleToObject, 1, 1) \
F(HaveSameMap, 2, 1) \
@@ -430,6 +441,7 @@ namespace internal {
F(GetScopeDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
+ F(SetScopeVariableValue, 6, 1) \
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
@@ -471,20 +483,6 @@ namespace internal {
F(SetFlags, 1, 1) \
F(CollectGarbage, 1, 1) \
F(GetHeapUsage, 0, 1) \
- \
- /* LiveObjectList support*/ \
- F(HasLOLEnabled, 0, 1) \
- F(CaptureLOL, 0, 1) \
- F(DeleteLOL, 1, 1) \
- F(DumpLOL, 5, 1) \
- F(GetLOLObj, 1, 1) \
- F(GetLOLObjId, 1, 1) \
- F(GetLOLObjRetainers, 6, 1) \
- F(GetLOLPath, 3, 1) \
- F(InfoLOL, 2, 1) \
- F(PrintLOLObj, 1, 1) \
- F(ResetLOL, 0, 1) \
- F(SummarizeLOL, 3, 1)
#else
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
@@ -516,6 +514,7 @@ namespace internal {
#define INLINE_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \
F(IsNonNegativeSmi, 1, 1) \
+ F(IsSymbol, 1, 1) \
F(IsArray, 1, 1) \
F(IsRegExp, 1, 1) \
F(IsConstructCall, 0, 1) \
@@ -527,6 +526,8 @@ namespace internal {
F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
@@ -547,7 +548,7 @@ namespace internal {
// ----------------------------------------------------------------------------
-// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
+// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed
// with a native call of the form %_name from within JS code that also have
// a corresponding runtime function, that is called for slow cases.
// Entries have the form F(name, number of arguments, number of return values).
@@ -569,8 +570,8 @@ namespace internal {
class RuntimeState {
public:
- StaticResource<StringInputBuffer>* string_input_buffer() {
- return &string_input_buffer_;
+ StaticResource<ConsStringIteratorOp>* string_iterator() {
+ return &string_iterator_;
}
unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() {
return &to_upper_mapping_;
@@ -578,29 +579,29 @@ class RuntimeState {
unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() {
return &to_lower_mapping_;
}
- StringInputBuffer* string_input_buffer_compare_bufx() {
- return &string_input_buffer_compare_bufx_;
+ ConsStringIteratorOp* string_iterator_compare_x() {
+ return &string_iterator_compare_x_;
}
- StringInputBuffer* string_input_buffer_compare_bufy() {
- return &string_input_buffer_compare_bufy_;
+ ConsStringIteratorOp* string_iterator_compare_y() {
+ return &string_iterator_compare_y_;
}
- StringInputBuffer* string_locale_compare_buf1() {
- return &string_locale_compare_buf1_;
+ ConsStringIteratorOp* string_locale_compare_it1() {
+ return &string_locale_compare_it1_;
}
- StringInputBuffer* string_locale_compare_buf2() {
- return &string_locale_compare_buf2_;
+ ConsStringIteratorOp* string_locale_compare_it2() {
+ return &string_locale_compare_it2_;
}
private:
RuntimeState() {}
// Non-reentrant string buffer for efficient general use in the runtime.
- StaticResource<StringInputBuffer> string_input_buffer_;
+ StaticResource<ConsStringIteratorOp> string_iterator_;
unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_;
unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_;
- StringInputBuffer string_input_buffer_compare_bufx_;
- StringInputBuffer string_input_buffer_compare_bufy_;
- StringInputBuffer string_locale_compare_buf1_;
- StringInputBuffer string_locale_compare_buf2_;
+ ConsStringIteratorOp string_iterator_compare_x_;
+ ConsStringIteratorOp string_iterator_compare_y_;
+ ConsStringIteratorOp string_locale_compare_it1_;
+ ConsStringIteratorOp string_locale_compare_it2_;
friend class Isolate;
friend class Runtime;
@@ -647,15 +648,16 @@ class Runtime : public AllStatic {
static const int kNotFound = -1;
- // Add symbols for all the intrinsic function names to a StringDictionary.
+ // Add internalized strings for all the intrinsic function names to a
+ // StringDictionary.
// Returns failure if an allocation fails. In this case, it must be
// retried with a new, empty StringDictionary, not with the same one.
// Alternatively, heap initialization can be completely restarted.
MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames(
Heap* heap, Object* dictionary);
- // Get the intrinsic function with the given name, which must be a symbol.
- static const Function* FunctionForSymbol(Handle<String> name);
+ // Get the intrinsic function with the given name, which must be internalized.
+ static const Function* FunctionForName(Handle<String> name);
// Get the intrinsic function with the given FunctionId.
static const Function* FunctionForId(FunctionId id);
diff --git a/src/3rdparty/v8/src/safepoint-table.cc b/src/3rdparty/v8/src/safepoint-table.cc
index 714e5c3..9e42304 100644
--- a/src/3rdparty/v8/src/safepoint-table.cc
+++ b/src/3rdparty/v8/src/safepoint-table.cc
@@ -59,7 +59,8 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const {
SafepointTable::SafepointTable(Code* code) {
- ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+ ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::COMPILED_STUB);
code_ = code;
Address header = code->instruction_start() + code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
@@ -158,14 +159,6 @@ unsigned SafepointTableBuilder::GetCodeOffset() const {
void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) {
- // For lazy deoptimization we need space to patch a call after every call.
- // Ensure there is always space for such patching, even if the code ends
- // in a call.
- int target_offset = assembler->pc_offset() + Deoptimizer::patch_size();
- while (assembler->pc_offset() < target_offset) {
- assembler->nop();
- }
-
// Make sure the safepoint table is properly aligned. Pad with nops.
assembler->Align(kIntSize);
assembler->RecordComment(";;; Safepoint table.");
diff --git a/src/3rdparty/v8/src/scanner.h b/src/3rdparty/v8/src/scanner.h
index 4de413b..a454750 100644
--- a/src/3rdparty/v8/src/scanner.h
+++ b/src/3rdparty/v8/src/scanner.h
@@ -145,7 +145,7 @@ class UnicodeCache {
// Caching predicates used by scanners.
public:
UnicodeCache() {}
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
+ typedef unibrow::Utf8Decoder<512> Utf8Decoder;
StaticResource<Utf8Decoder>* utf8_decoder() {
return &utf8_decoder_;
@@ -183,9 +183,9 @@ class LiteralBuffer {
INLINE(void AddChar(uint32_t code_unit)) {
if (position_ >= backing_store_.length()) ExpandBuffer();
if (is_ascii_) {
- if (code_unit < kMaxAsciiCharCodeU) {
+ if (code_unit <= unibrow::Latin1::kMaxChar) {
backing_store_[position_] = static_cast<byte>(code_unit);
- position_ += kASCIISize;
+ position_ += kOneByteSize;
return;
}
ConvertToUtf16();
@@ -250,7 +250,7 @@ class LiteralBuffer {
} else {
new_store = backing_store_;
}
- char* src = reinterpret_cast<char*>(backing_store_.start());
+ uint8_t* src = backing_store_.start();
uc16* dst = reinterpret_cast<uc16*>(new_store.start());
for (int i = position_ - 1; i >= 0; i--) {
dst[i] = src[i];
@@ -315,8 +315,6 @@ class Scanner {
// -1 is outside of the range of any real source code.
static const int kNoOctalLocation = -1;
- typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
-
explicit Scanner(UnicodeCache* scanner_contants);
void Initialize(Utf16CharacterStream* source);
@@ -432,10 +430,6 @@ class Scanner {
// be empty).
bool ScanRegExpFlags();
- // Tells whether the buffer contains an identifier (no escapes).
- // Used for checking if a property name is an identifier.
- static bool IsIdentifier(unibrow::CharacterStream* buffer);
-
private:
// The current and look-ahead token.
struct TokenDesc {
diff --git a/src/3rdparty/v8/src/scopeinfo.cc b/src/3rdparty/v8/src/scopeinfo.cc
index 66e2013..6d55e86 100644
--- a/src/3rdparty/v8/src/scopeinfo.cc
+++ b/src/3rdparty/v8/src/scopeinfo.cc
@@ -150,8 +150,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) {
}
-ScopeInfo* ScopeInfo::Empty() {
- return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array());
+ScopeInfo* ScopeInfo::Empty(Isolate* isolate) {
+ return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array());
}
@@ -201,7 +201,8 @@ int ScopeInfo::ContextLength(bool qml_function) {
Type() == WITH_SCOPE ||
(Type() == FUNCTION_SCOPE && CallsEval()) ||
Type() == MODULE_SCOPE;
- // TODO: The QML mode should be checked in the has_context expression.
+ // TODO(pvarga): The QML mode should be checked in the
+ // has_context expression.
if (has_context || qml_function) {
return Context::MIN_CONTEXT_SLOTS + context_locals +
(function_name_context_slot ? 1 : 0);
@@ -287,7 +288,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
int ScopeInfo::StackSlotIndex(String* name) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
if (length() > 0) {
int start = StackLocalEntriesIndex();
int end = StackLocalEntriesIndex() + StackLocalCount();
@@ -304,7 +305,7 @@ int ScopeInfo::StackSlotIndex(String* name) {
int ScopeInfo::ContextSlotIndex(String* name,
VariableMode* mode,
InitializationFlag* init_flag) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
ASSERT(mode != NULL);
ASSERT(init_flag != NULL);
if (length() > 0) {
@@ -328,6 +329,7 @@ int ScopeInfo::ContextSlotIndex(String* name,
return result;
}
}
+ // Cache as not found. Mode and init flag don't matter.
context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
}
return -1;
@@ -335,7 +337,7 @@ int ScopeInfo::ContextSlotIndex(String* name,
int ScopeInfo::ParameterIndex(String* name) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
if (length() > 0) {
// We must read parameters from the end since for
// multiply declared parameters the value of the
@@ -355,7 +357,7 @@ int ScopeInfo::ParameterIndex(String* name) {
int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
ASSERT(mode != NULL);
if (length() > 0) {
if (FunctionVariableField::decode(Flags()) == CONTEXT &&
@@ -368,6 +370,31 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
+bool ScopeInfo::CopyContextLocalsToScopeObject(
+ Isolate* isolate,
+ Handle<Context> context,
+ Handle<JSObject> scope_object) {
+ int local_count = ContextLocalCount();
+ if (local_count == 0) return true;
+ // Fill all context locals to the context extension.
+ int start = ContextLocalNameEntriesIndex();
+ int end = start + local_count;
+ for (int i = start; i < end; ++i) {
+ int context_index = Context::MIN_CONTEXT_SLOTS + i - start;
+ RETURN_IF_EMPTY_HANDLE_VALUE(
+ isolate,
+ SetProperty(isolate,
+ scope_object,
+ Handle<String>(String::cast(get(i))),
+ Handle<Object>(context->get(context_index), isolate),
+ ::NONE,
+ kNonStrictMode),
+ false);
+ }
+ return true;
+}
+
+
int ScopeInfo::ParameterEntriesIndex() {
ASSERT(length() > 0);
return kVariablePartIndex;
@@ -423,13 +450,13 @@ void ContextSlotCache::Update(Object* data,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* symbol;
+ String* internalized_name;
ASSERT(slot_index > kNotFound);
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
- int index = Hash(data, symbol);
+ if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
+ int index = Hash(data, internalized_name);
Key& key = keys_[index];
key.data = data;
- key.name = symbol;
+ key.name = internalized_name;
// Please note value only takes a uint as index.
values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
@@ -451,8 +478,8 @@ void ContextSlotCache::ValidateEntry(Object* data,
VariableMode mode,
InitializationFlag init_flag,
int slot_index) {
- String* symbol;
- if (HEAP->LookupSymbolIfExists(name, &symbol)) {
+ String* internalized_name;
+ if (HEAP->InternalizeStringIfExists(name, &internalized_name)) {
int index = Hash(data, name);
Key& key = keys_[index];
ASSERT(key.data == data);
@@ -511,4 +538,32 @@ void ScopeInfo::Print() {
}
#endif // DEBUG
+
+//---------------------------------------------------------------------------
+// ModuleInfo.
+
+Handle<ModuleInfo> ModuleInfo::Create(
+ Isolate* isolate, Interface* interface, Scope* scope) {
+ Handle<ModuleInfo> info = Allocate(isolate, interface->Length());
+ info->set_host_index(interface->Index());
+ int i = 0;
+ for (Interface::Iterator it = interface->iterator();
+ !it.done(); it.Advance(), ++i) {
+ Variable* var = scope->LocalLookup(it.name());
+ info->set_name(i, *it.name());
+ info->set_mode(i, var->mode());
+ ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
+ if (var->mode() == MODULE) {
+ ASSERT(it.interface()->IsFrozen());
+ ASSERT(it.interface()->Index() >= 0);
+ info->set_index(i, it.interface()->Index());
+ } else {
+ ASSERT(var->index() >= 0);
+ info->set_index(i, var->index());
+ }
+ }
+ ASSERT(i == info->length());
+ return info;
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopeinfo.h b/src/3rdparty/v8/src/scopeinfo.h
index 93734f5..a884b3b 100644
--- a/src/3rdparty/v8/src/scopeinfo.h
+++ b/src/3rdparty/v8/src/scopeinfo.h
@@ -114,9 +114,9 @@ class ContextSlotCache {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class ModeField: public BitField<VariableMode, 0, 3> {};
- class InitField: public BitField<InitializationFlag, 3, 1> {};
- class IndexField: public BitField<int, 4, 32-4> {};
+ class ModeField: public BitField<VariableMode, 0, 4> {};
+ class InitField: public BitField<InitializationFlag, 4, 1> {};
+ class IndexField: public BitField<int, 5, 32-5> {};
private:
uint32_t value_;
@@ -130,6 +130,67 @@ class ContextSlotCache {
};
+
+
+//---------------------------------------------------------------------------
+// Auxiliary class used for the description of module instances.
+// Used by Runtime_DeclareModules.
+
+class ModuleInfo: public FixedArray {
+ public:
+ static ModuleInfo* cast(Object* description) {
+ return static_cast<ModuleInfo*>(FixedArray::cast(description));
+ }
+
+ static Handle<ModuleInfo> Create(
+ Isolate* isolate, Interface* interface, Scope* scope);
+
+ // Index of module's context in host context.
+ int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
+
+ // Name, mode, and index of the i-th export, respectively.
+ // For value exports, the index is the slot of the value in the module
+ // context, for exported modules it is the slot index of the
+ // referred module's context in the host context.
+ // TODO(rossberg): This format cannot yet handle exports of modules declared
+ // in earlier scripts.
+ String* name(int i) { return String::cast(get(name_offset(i))); }
+ VariableMode mode(int i) {
+ return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value());
+ }
+ int index(int i) { return Smi::cast(get(index_offset(i)))->value(); }
+
+ int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; }
+
+ private:
+ // The internal format is: Index, (Name, VariableMode, Index)*
+ enum {
+ HOST_OFFSET,
+ NAME_OFFSET,
+ MODE_OFFSET,
+ INDEX_OFFSET,
+ HEADER_SIZE = NAME_OFFSET,
+ ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1
+ };
+ inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; }
+ inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; }
+ inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; }
+
+ static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) {
+ return Handle<ModuleInfo>::cast(
+ isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length));
+ }
+ void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); }
+ void set_name(int i, String* name) { set(name_offset(i), name); }
+ void set_mode(int i, VariableMode mode) {
+ set(mode_offset(i), Smi::FromInt(mode));
+ }
+ void set_index(int i, int index) {
+ set(index_offset(i), Smi::FromInt(index));
+ }
+};
+
+
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/src/3rdparty/v8/src/scopes.cc b/src/3rdparty/v8/src/scopes.cc
index d2a919a..76d3ed4 100644
--- a/src/3rdparty/v8/src/scopes.cc
+++ b/src/3rdparty/v8/src/scopes.cc
@@ -54,8 +54,8 @@ namespace internal {
static bool Match(void* key1, void* key2) {
String* name1 = *reinterpret_cast<String**>(key1);
String* name2 = *reinterpret_cast<String**>(key2);
- ASSERT(name1->IsSymbol());
- ASSERT(name2->IsSymbol());
+ ASSERT(name1->IsInternalizedString());
+ ASSERT(name2->IsInternalizedString());
return name1 == name2;
}
@@ -107,9 +107,10 @@ Variable* VariableMap::Lookup(Handle<String> name) {
// Implementation of Scope
Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
- : isolate_(Isolate::Current()),
+ : isolate_(zone->isolate()),
inner_scopes_(4, zone),
variables_(zone),
+ internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -133,6 +134,7 @@ Scope::Scope(Scope* inner_scope,
: isolate_(Isolate::Current()),
inner_scopes_(4, zone),
variables_(zone),
+ internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -155,6 +157,7 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
: isolate_(Isolate::Current()),
inner_scopes_(1, zone),
variables_(zone),
+ internals_(0, zone),
temps_(0, zone),
params_(0, zone),
unresolved_(0, zone),
@@ -181,7 +184,7 @@ void Scope::SetDefaults(ScopeType type,
Handle<ScopeInfo> scope_info) {
outer_scope_ = outer_scope;
type_ = type;
- scope_name_ = isolate_->factory()->empty_symbol();
+ scope_name_ = isolate_->factory()->empty_string();
dynamics_ = NULL;
receiver_ = NULL;
function_ = NULL;
@@ -201,6 +204,8 @@ void Scope::SetDefaults(ScopeType type,
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
+ num_modules_ = 0;
+ module_var_ = NULL,
scope_info_ = scope_info;
start_position_ = RelocInfo::kNoPosition;
end_position_ = RelocInfo::kNoPosition;
@@ -307,23 +312,6 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- if (FLAG_harmony_scoping) {
- VariableProxy* proxy = scope->CheckAssignmentToConst();
- if (proxy != NULL) {
- // Found an assignment to const. Throw a syntax error.
- MessageLocation location(info->script(),
- proxy->position(),
- proxy->position());
- Isolate* isolate = info->isolate();
- Factory* factory = isolate->factory();
- Handle<JSArray> array = factory->NewJSArray(0);
- Handle<Object> result =
- factory->NewSyntaxError("harmony_const_assign", array);
- isolate->Throw(*result, &location);
- return false;
- }
- }
-
info->SetScope(scope);
return true;
}
@@ -351,7 +339,7 @@ void Scope::Initialize() {
if (is_declaration_scope()) {
Variable* var =
variables_.Declare(this,
- isolate_->factory()->this_symbol(),
+ isolate_->factory()->this_string(),
VAR,
false,
Variable::THIS,
@@ -368,7 +356,7 @@ void Scope::Initialize() {
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
variables_.Declare(this,
- isolate_->factory()->arguments_symbol(),
+ isolate_->factory()->arguments_string(),
VAR,
true,
Variable::ARGUMENTS,
@@ -379,6 +367,7 @@ void Scope::Initialize() {
Scope* Scope::FinalizeBlockScope() {
ASSERT(is_block_scope());
+ ASSERT(internals_.is_empty());
ASSERT(temps_.is_empty());
ASSERT(params_.is_empty());
@@ -519,6 +508,19 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
}
+Variable* Scope::NewInternal(Handle<String> name) {
+ ASSERT(!already_resolved());
+ Variable* var = new(zone()) Variable(this,
+ name,
+ INTERNAL,
+ false,
+ Variable::NORMAL,
+ kCreatedInitialized);
+ internals_.Add(var, zone());
+ return var;
+}
+
+
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
Variable* var = new(zone()) Variable(this,
@@ -576,29 +578,6 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
-VariableProxy* Scope::CheckAssignmentToConst() {
- // Check this scope.
- if (is_extended_mode()) {
- for (int i = 0; i < unresolved_.length(); i++) {
- ASSERT(unresolved_[i]->var() != NULL);
- if (unresolved_[i]->var()->is_const_mode() &&
- unresolved_[i]->IsLValue()) {
- return unresolved_[i];
- }
- }
- }
-
- // Check inner scopes.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
- if (proxy != NULL) return proxy;
- }
-
- // No assignments to const found.
- return NULL;
-}
-
-
class VarAndOrder {
public:
VarAndOrder(Variable* var, int order) : var_(var), order_(order) { }
@@ -619,6 +598,15 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ASSERT(stack_locals != NULL);
ASSERT(context_locals != NULL);
+ // Collect internals which are always allocated on the heap.
+ for (int i = 0; i < internals_.length(); i++) {
+ Variable* var = internals_[i];
+ if (var->is_used()) {
+ ASSERT(var->IsContextSlot());
+ context_locals->Add(var, zone());
+ }
+ }
+
// Collect temporaries which are always allocated on the stack.
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
@@ -628,9 +616,8 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
}
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
-
// Collect declared local variables.
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
@@ -663,18 +650,18 @@ bool Scope::AllocateVariables(CompilationInfo* info,
}
PropagateScopeInfo(outer_scope_calls_non_strict_eval);
- // 2) Resolve variables.
+ // 2) Allocate module instances.
+ if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+ ASSERT(num_modules_ == 0);
+ AllocateModulesRecursively(this);
+ }
+
+ // 3) Resolve variables.
if (!ResolveVariablesRecursively(info, factory)) return false;
- // 3) Allocate variables.
+ // 4) Allocate variables.
AllocateVariablesRecursively();
- // 4) Allocate and link module instance objects.
- if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
- AllocateModules(info);
- LinkModules(info);
- }
-
return true;
}
@@ -706,17 +693,12 @@ bool Scope::HasTrivialOuterContext() const {
bool Scope::HasLazyCompilableOuterContext() const {
Scope* outer = outer_scope_;
if (outer == NULL) return true;
- // There are several reasons that prevent lazy compilation:
- // - This scope is inside a with scope and all declaration scopes between
- // them have empty contexts. Such declaration scopes become invisible
- // during scope info deserialization.
- // - This scope is inside a strict eval scope with variables that are
- // potentially context allocated in an artificial function scope that
- // is not deserialized correctly.
+ // We have to prevent lazy compilation if this scope is inside a with scope
+ // and all declaration scopes between them have empty contexts. Such
+ // declaration scopes may become invisible during scope info deserialization.
outer = outer->DeclarationScope();
bool found_non_trivial_declarations = false;
for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
- if (scope->is_eval_scope()) return false;
if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
found_non_trivial_declarations = true;
@@ -746,6 +728,15 @@ int Scope::ContextChainLength(Scope* scope) {
}
+Scope* Scope::GlobalScope() {
+ Scope* scope = this;
+ while (!scope->is_global_scope()) {
+ scope = scope->outer_scope();
+ }
+ return scope;
+}
+
+
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -919,6 +910,11 @@ void Scope::Print(int n) {
PrintVar(n1, temps_[i]);
}
+ Indent(n1, "// internal vars\n");
+ for (int i = 0; i < internals_.length(); i++) {
+ PrintVar(n1, internals_[i]);
+ }
+
Indent(n1, "// local vars\n");
PrintMap(n1, &variables_);
@@ -1040,8 +1036,10 @@ bool Scope::ResolveVariable(CompilationInfo* info,
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
// Get the context before the debugger was entered.
SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ while (save != NULL &&
+ *save->context() == *isolate_->debug()->debug_context()) {
save = save->prev();
+ }
global = Handle<GlobalObject>(save->context()->global_object());
}
@@ -1065,11 +1063,14 @@ bool Scope::ResolveVariable(CompilationInfo* info,
Handle<GlobalObject> global = isolate_->global_object();
#ifdef ENABLE_DEBUGGER_SUPPORT
- if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
+ if (isolate_->debug()->IsLoaded() &&
+ isolate_->debug()->InDebugger()) {
// Get the context before the debugger was entered.
SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ while (save != NULL &&
+ *save->context() == *isolate_->debug()->debug_context()) {
save = save->prev();
+ }
global = Handle<GlobalObject>(save->context()->global_object());
}
@@ -1099,8 +1100,10 @@ bool Scope::ResolveVariable(CompilationInfo* info,
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
// Get the context before the debugger was entered.
SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ while (save != NULL &&
+ *save->context() == *isolate_->debug()->debug_context()) {
save = save->prev();
+ }
global = Handle<GlobalObject>(save->context()->global_object());
}
@@ -1125,8 +1128,10 @@ bool Scope::ResolveVariable(CompilationInfo* info,
if (isolate_->debug()->IsLoaded() && isolate_->debug()->InDebugger()) {
// Get the context before the debugger was entered.
SaveContext *save = isolate_->save_context();
- while (save != NULL && *save->context() == *isolate_->debug()->debug_context())
+ while (save != NULL &&
+ *save->context() == *isolate_->debug()->debug_context()) {
save = save->prev();
+ }
global = Handle<GlobalObject>(save->context()->global_object());
}
@@ -1146,7 +1151,20 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
ASSERT(var != NULL);
- proxy->BindTo(var);
+
+ if (FLAG_harmony_scoping && is_extended_mode() &&
+ var->is_const_mode() && proxy->IsLValue()) {
+ // Assignment to const. Throw a syntax error.
+ MessageLocation location(
+ info->script(), proxy->position(), proxy->position());
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ Handle<Object> result =
+ factory->NewSyntaxError("harmony_const_assign", array);
+ isolate->Throw(*result, &location);
+ return false;
+ }
if (FLAG_harmony_modules) {
bool ok;
@@ -1168,9 +1186,8 @@ bool Scope::ResolveVariable(CompilationInfo* info,
// Inconsistent use of module. Throw a syntax error.
// TODO(rossberg): generate more helpful error message.
- MessageLocation location(info->script(),
- proxy->position(),
- proxy->position());
+ MessageLocation location(
+ info->script(), proxy->position(), proxy->position());
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
Handle<JSArray> array = factory->NewJSArray(1);
@@ -1182,6 +1199,8 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
}
+ proxy->BindTo(var);
+
return true;
}
@@ -1256,6 +1275,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
// Exceptions: temporary variables are never allocated in a context;
// catch-bound variables are always allocated in a context.
if (var->mode() == TEMPORARY) return false;
+ if (var->mode() == INTERNAL) return true;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
@@ -1268,7 +1288,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
bool Scope::HasArgumentsParameter() {
for (int i = 0; i < params_.length(); i++) {
if (params_[i]->name().is_identical_to(
- isolate_->factory()->arguments_symbol())) {
+ isolate_->factory()->arguments_string())) {
return true;
}
}
@@ -1288,7 +1308,7 @@ void Scope::AllocateHeapSlot(Variable* var) {
void Scope::AllocateParameterLocals() {
ASSERT(is_function_scope());
- Variable* arguments = LocalLookup(isolate_->factory()->arguments_symbol());
+ Variable* arguments = LocalLookup(isolate_->factory()->arguments_string());
ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly
bool uses_nonstrict_arguments = false;
@@ -1344,7 +1364,7 @@ void Scope::AllocateParameterLocals() {
void Scope::AllocateNonParameterLocal(Variable* var) {
ASSERT(var->scope() == this);
- ASSERT(!var->IsVariable(isolate_->factory()->result_symbol()) ||
+ ASSERT(!var->IsVariable(isolate_->factory()->result_string()) ||
!var->IsStackLocal());
if (var->IsUnallocated() && MustAllocate(var)) {
if (MustAllocateInContext(var)) {
@@ -1362,15 +1382,17 @@ void Scope::AllocateNonParameterLocals() {
AllocateNonParameterLocal(temps_[i]);
}
- ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+ for (int i = 0; i < internals_.length(); i++) {
+ AllocateNonParameterLocal(internals_[i]);
+ }
+ ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
vars.Add(VarAndOrder(var, p->order), zone());
}
-
vars.Sort(VarAndOrder::Compare);
int var_count = vars.length();
for (int i = 0; i < var_count; i++) {
@@ -1423,89 +1445,34 @@ void Scope::AllocateVariablesRecursively() {
}
-int Scope::StackLocalCount() const {
- return num_stack_slots() -
- (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
-}
-
-
-int Scope::ContextLocalCount() const {
- if (num_heap_slots() == 0) return 0;
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
-}
-
-
-void Scope::AllocateModules(CompilationInfo* info) {
- ASSERT(is_global_scope() || is_module_scope());
-
+void Scope::AllocateModulesRecursively(Scope* host_scope) {
+ if (already_resolved()) return;
if (is_module_scope()) {
ASSERT(interface_->IsFrozen());
- ASSERT(scope_info_.is_null());
-
- // TODO(rossberg): This has to be the initial compilation of this code.
- // We currently do not allow recompiling any module definitions.
- Handle<ScopeInfo> scope_info = GetScopeInfo();
- Factory* factory = info->isolate()->factory();
- Handle<Context> context = factory->NewModuleContext(scope_info);
- Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
- context->set_module(*instance);
-
- bool ok;
- interface_->MakeSingleton(instance, &ok);
- ASSERT(ok);
+ Handle<String> name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".module"));
+ ASSERT(module_var_ == NULL);
+ module_var_ = host_scope->NewInternal(name);
+ ++host_scope->num_modules_;
}
- // Allocate nested modules.
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_.at(i);
- if (inner_scope->is_module_scope()) {
- inner_scope->AllocateModules(info);
- }
+ inner_scope->AllocateModulesRecursively(host_scope);
}
}
-void Scope::LinkModules(CompilationInfo* info) {
- ASSERT(is_global_scope() || is_module_scope());
+int Scope::StackLocalCount() const {
+ return num_stack_slots() -
+ (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
+}
- if (is_module_scope()) {
- Handle<JSModule> instance = interface_->Instance();
-
- // Populate the module instance object.
- const PropertyAttributes ro_attr =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
- const PropertyAttributes rw_attr =
- static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
- for (Interface::Iterator it = interface_->iterator();
- !it.done(); it.Advance()) {
- if (it.interface()->IsModule()) {
- Handle<Object> value = it.interface()->Instance();
- ASSERT(!value.is_null());
- JSReceiver::SetProperty(
- instance, it.name(), value, ro_attr, kStrictMode);
- } else {
- Variable* var = LocalLookup(it.name());
- ASSERT(var != NULL && var->IsContextSlot());
- PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
- Handle<AccessorInfo> info =
- Accessors::MakeModuleExport(it.name(), var->index(), attr);
- Handle<Object> result = SetAccessor(instance, info);
- ASSERT(!(result.is_null() || result->IsUndefined()));
- USE(result);
- }
- }
- USE(JSObject::PreventExtensions(instance));
- }
- // Link nested modules.
- for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_.at(i);
- if (inner_scope->is_module_scope()) {
- inner_scope->LinkModules(info);
- }
- }
+int Scope::ContextLocalCount() const {
+ if (num_heap_slots() == 0) return 0;
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+ (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
}
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/scopes.h b/src/3rdparty/v8/src/scopes.h
index e9425f0..e2e7cd1 100644
--- a/src/3rdparty/v8/src/scopes.h
+++ b/src/3rdparty/v8/src/scopes.h
@@ -186,6 +186,12 @@ class Scope: public ZoneObject {
// such a variable again if it was added; otherwise this is a no-op.
void RemoveUnresolved(VariableProxy* var);
+ // Creates a new internal variable in this scope. The name is only used
+ // for printing and cannot be used to find the variable. In particular,
+ // the only way to get hold of the temporary is by keeping the Variable*
+ // around.
+ Variable* NewInternal(Handle<String> name);
+
// Creates a new temporary variable in this scope. The name is only used
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
@@ -218,11 +224,6 @@ class Scope: public ZoneObject {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
- // For harmony block scoping mode: Check if the scope has variable proxies
- // that are used as lvalues and point to const variables. Assumes that scopes
- // have been analyzed and variables been resolved.
- VariableProxy* CheckAssignmentToConst();
-
// ---------------------------------------------------------------------------
// Scope-specific info.
@@ -378,6 +379,12 @@ class Scope: public ZoneObject {
int StackLocalCount() const;
int ContextLocalCount() const;
+ // For global scopes, the number of module literals (including nested ones).
+ int num_modules() const { return num_modules_; }
+
+ // For module scopes, the host scope's internal variable binding this module.
+ Variable* module_var() const { return module_var_; }
+
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -396,6 +403,9 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
+ // Find the innermost global scope.
+ Scope* GlobalScope();
+
// Find the first function, global, or eval scope. This is the scope
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
@@ -450,6 +460,8 @@ class Scope: public ZoneObject {
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
+ // Compiler-allocated (user-invisible) internals.
+ ZoneList<Variable*> internals_;
// Compiler-allocated (user-invisible) temporaries.
ZoneList<Variable*> temps_;
// Parameter list in source order.
@@ -505,6 +517,12 @@ class Scope: public ZoneObject {
int num_stack_slots_;
int num_heap_slots_;
+ // The number of modules (including nested ones).
+ int num_modules_;
+
+ // For module scopes, the host scope's internal variable binding this module.
+ Variable* module_var_;
+
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
bool already_resolved() { return already_resolved_; }
@@ -589,6 +607,7 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocal(Variable* var);
void AllocateNonParameterLocals();
void AllocateVariablesRecursively();
+ void AllocateModulesRecursively(Scope* host_scope);
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -602,13 +621,6 @@ class Scope: public ZoneObject {
bool AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory);
- // Instance objects have to be created ahead of time (before code generation)
- // because of potentially cyclic references between them.
- // Linking also has to be a separate stage, since populating one object may
- // potentially require (forward) references to others.
- void AllocateModules(CompilationInfo* info);
- void LinkModules(CompilationInfo* info);
-
private:
// Construct a scope based on the scope info.
Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
diff --git a/src/3rdparty/v8/src/serialize.cc b/src/3rdparty/v8/src/serialize.cc
index dfc5574..e587dfa 100644
--- a/src/3rdparty/v8/src/serialize.cc
+++ b/src/3rdparty/v8/src/serialize.cc
@@ -30,6 +30,7 @@
#include "accessors.h"
#include "api.h"
#include "bootstrapper.h"
+#include "deoptimizer.h"
#include "execution.h"
#include "global-handles.h"
#include "ic-inl.h"
@@ -443,15 +444,15 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
30,
"TranscendentalCache::caches()");
- Add(ExternalReference::handle_scope_next_address().address(),
+ Add(ExternalReference::handle_scope_next_address(isolate).address(),
UNCLASSIFIED,
31,
"HandleScope::next");
- Add(ExternalReference::handle_scope_limit_address().address(),
+ Add(ExternalReference::handle_scope_limit_address(isolate).address(),
UNCLASSIFIED,
32,
"HandleScope::limit");
- Add(ExternalReference::handle_scope_level_address().address(),
+ Add(ExternalReference::handle_scope_level_address(isolate).address(),
UNCLASSIFIED,
33,
"HandleScope::level");
@@ -527,6 +528,34 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
51,
"Code::MakeCodeYoung");
+ Add(ExternalReference::cpu_features().address(),
+ UNCLASSIFIED,
+ 52,
+ "cpu_features");
+ Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
+ UNCLASSIFIED,
+ 53,
+ "Heap::NewSpaceAllocationTopAddress");
+ Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
+ UNCLASSIFIED,
+ 54,
+ "Heap::NewSpaceAllocationLimitAddress");
+ Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+ UNCLASSIFIED,
+ 55,
+ "Runtime::AllocateInNewSpace");
+
+ // Add a small set of deopt entry addresses to encoder without generating the
+ // deopt table code, which isn't possible at deserialization time.
+ HandleScope scope(isolate);
+ for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
+ Address address = Deoptimizer::GetDeoptimizationEntry(
+ isolate,
+ entry,
+ Deoptimizer::LAZY,
+ Deoptimizer::CALCULATE_ENTRY_ADDRESS);
+ Add(address, LAZY_DEOPTIMIZATION, 56 + entry, "lazy_deopt");
+ }
}
@@ -1327,9 +1356,9 @@ void PartialSerializer::SerializeObject(
// should go through the root array or through the partial snapshot cache.
// If this is not the case you may have to add something to the root array.
ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
- // All the symbols that the partial snapshot needs should be either in the
- // root table or in the partial snapshot cache.
- ASSERT(!heap_object->IsSymbol());
+ // All the internalized strings that the partial snapshot needs should be
+ // either in the root table or in the partial snapshot cache.
+ ASSERT(!heap_object->IsInternalizedString());
if (address_mapper_.IsMapped(heap_object)) {
int space = SpaceOfObject(heap_object);
diff --git a/src/3rdparty/v8/src/serialize.h b/src/3rdparty/v8/src/serialize.h
index 2041792..e0bcf4e 100644
--- a/src/3rdparty/v8/src/serialize.h
+++ b/src/3rdparty/v8/src/serialize.h
@@ -47,10 +47,11 @@ enum TypeCode {
EXTENSION,
ACCESSOR,
RUNTIME_ENTRY,
- STUB_CACHE_TABLE
+ STUB_CACHE_TABLE,
+ LAZY_DEOPTIMIZATION
};
-const int kTypeCodeCount = STUB_CACHE_TABLE + 1;
+const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
const int kFirstTypeCode = UNCLASSIFIED;
const int kReferenceIdBits = 16;
@@ -59,6 +60,7 @@ const int kReferenceTypeShift = kReferenceIdBits;
const int kDebugRegisterBits = 4;
const int kDebugIdShift = kDebugRegisterBits;
+const int kDeoptTableSerializeEntryCount = 8;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
@@ -636,7 +638,7 @@ class StartupSerializer : public Serializer {
// Serialize the current state of the heap. The order is:
// 1) Strong references.
// 2) Partial snapshot cache.
- // 3) Weak references (e.g. the symbol table).
+ // 3) Weak references (e.g. the string table).
virtual void SerializeStrongReferences();
virtual void SerializeObject(Object* o,
HowToCode how_to_code,
diff --git a/src/3rdparty/v8/src/smart-pointers.h b/src/3rdparty/v8/src/smart-pointers.h
index 345c4d4..02025bb 100644
--- a/src/3rdparty/v8/src/smart-pointers.h
+++ b/src/3rdparty/v8/src/smart-pointers.h
@@ -58,11 +58,16 @@ class SmartPointerBase {
// You can get the underlying pointer out with the * operator.
inline T* operator*() { return p_; }
- // You can use [n] to index as if it was a plain pointer
+ // You can use [n] to index as if it was a plain pointer.
inline T& operator[](size_t i) {
return p_[i];
}
+ // You can use [n] to index as if it was a plain pointer.
+ const inline T& operator[](size_t i) const {
+ return p_[i];
+ }
+
// We don't have implicit conversion to a T* since that hinders migration:
// You would not be able to change a method from returning a T* to
// returning an SmartArrayPointer<T> and then get errors wherever it is used.
@@ -77,6 +82,11 @@ class SmartPointerBase {
return temp;
}
+ inline void Reset(T* new_value) {
+ if (p_) Deallocator::Delete(p_);
+ p_ = new_value;
+ }
+
// Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like
// the copy constructor it removes the pointer in the original to avoid
// double freeing.
diff --git a/src/3rdparty/v8/src/spaces.cc b/src/3rdparty/v8/src/spaces.cc
index 583b2ca..3adb2e3 100644
--- a/src/3rdparty/v8/src/spaces.cc
+++ b/src/3rdparty/v8/src/spaces.cc
@@ -27,7 +27,6 @@
#include "v8.h"
-#include "liveobjectlist-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"
@@ -69,11 +68,11 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
HeapObjectIterator::HeapObjectIterator(Page* page,
HeapObjectCallback size_func) {
Space* owner = page->owner();
- ASSERT(owner == HEAP->old_pointer_space() ||
- owner == HEAP->old_data_space() ||
- owner == HEAP->map_space() ||
- owner == HEAP->cell_space() ||
- owner == HEAP->code_space());
+ ASSERT(owner == page->heap()->old_pointer_space() ||
+ owner == page->heap()->old_data_space() ||
+ owner == page->heap()->map_space() ||
+ owner == page->heap()->cell_space() ||
+ owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner),
page->area_start(),
page->area_end(),
@@ -207,17 +206,18 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
}
-
-Address CodeRange::AllocateRawMemory(const size_t requested,
+Address CodeRange::AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
size_t* allocated) {
+ ASSERT(commit_size <= requested_size);
ASSERT(current_allocation_block_index_ < allocation_list_.length());
- if (requested > allocation_list_[current_allocation_block_index_].size) {
+ if (requested_size > allocation_list_[current_allocation_block_index_].size) {
// Find an allocation block large enough. This function call may
// call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
- GetNextAllocationBlock(requested);
+ GetNextAllocationBlock(requested_size);
}
// Commit the requested memory at the start of the current allocation block.
- size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
+ size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
FreeBlock current = allocation_list_[current_allocation_block_index_];
if (aligned_requested >= (current.size - Page::kPageSize)) {
// Don't leave a small free block, useless for a large object or chunk.
@@ -227,9 +227,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
ASSERT(*allocated <= current.size);
ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
- if (!MemoryAllocator::CommitCodePage(code_range_,
- current.start,
- *allocated)) {
+ if (!MemoryAllocator::CommitExecutableMemory(code_range_,
+ current.start,
+ commit_size,
+ *allocated)) {
*allocated = 0;
return NULL;
}
@@ -242,6 +243,16 @@ Address CodeRange::AllocateRawMemory(const size_t requested,
}
+bool CodeRange::CommitRawMemory(Address start, size_t length) {
+ return code_range_->Commit(start, length, true);
+}
+
+
+bool CodeRange::UncommitRawMemory(Address start, size_t length) {
+ return code_range_->Uncommit(start, length);
+}
+
+
void CodeRange::FreeRawMemory(Address address, size_t length) {
ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
free_list_.Add(FreeBlock(address, length));
@@ -353,20 +364,25 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
}
-Address MemoryAllocator::AllocateAlignedMemory(size_t size,
+Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
+ size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory* controller) {
+ ASSERT(commit_size <= reserve_size);
VirtualMemory reservation;
- Address base = ReserveAlignedMemory(size, alignment, &reservation);
+ Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
if (base == NULL) return NULL;
if (executable == EXECUTABLE) {
- if (!CommitCodePage(&reservation, base, size)) {
+ if (!CommitExecutableMemory(&reservation,
+ base,
+ commit_size,
+ reserve_size)) {
base = NULL;
}
} else {
- if (!reservation.Commit(base, size, false)) {
+ if (!reservation.Commit(base, commit_size, false)) {
base = NULL;
}
}
@@ -448,7 +464,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+ chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
+ chunk->parallel_sweeping_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -469,6 +487,53 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
}
+// Commit MemoryChunk area to the requested size.
+bool MemoryChunk::CommitArea(size_t requested) {
+ size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
+ MemoryAllocator::CodePageGuardSize() : 0;
+ size_t header_size = area_start() - address() - guard_size;
+ size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
+ size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
+ OS::CommitPageSize());
+
+ if (commit_size > committed_size) {
+ // Commit size should be less or equal than the reserved size.
+ ASSERT(commit_size <= size() - 2 * guard_size);
+ // Append the committed area.
+ Address start = address() + committed_size + guard_size;
+ size_t length = commit_size - committed_size;
+ if (reservation_.IsReserved()) {
+ if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) {
+ return false;
+ }
+ } else {
+ CodeRange* code_range = heap_->isolate()->code_range();
+ ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ if (!code_range->CommitRawMemory(start, length)) return false;
+ }
+
+ if (Heap::ShouldZapGarbage()) {
+ heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+ }
+ } else if (commit_size < committed_size) {
+ ASSERT(commit_size > 0);
+ // Shrink the committed area.
+ size_t length = committed_size - commit_size;
+ Address start = address() + committed_size + guard_size - length;
+ if (reservation_.IsReserved()) {
+ if (!reservation_.Uncommit(start, length)) return false;
+ } else {
+ CodeRange* code_range = heap_->isolate()->code_range();
+ ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+ if (!code_range->UncommitRawMemory(start, length)) return false;
+ }
+ }
+
+ area_end_ = area_start_ + requested;
+ return true;
+}
+
+
void MemoryChunk::InsertAfter(MemoryChunk* other) {
next_chunk_ = other->next_chunk_;
prev_chunk_ = other;
@@ -489,9 +554,12 @@ void MemoryChunk::Unlink() {
}
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
+ intptr_t commit_area_size,
Executability executable,
Space* owner) {
+ ASSERT(commit_area_size <= reserve_area_size);
+
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = NULL;
@@ -499,8 +567,38 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Address area_start = NULL;
Address area_end = NULL;
+ //
+ // MemoryChunk layout:
+ //
+ // Executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- base + CodePageGuardStartOffset
+ // | Guard |
+ // +----------------------------+<- area_start_
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Guard |
+ // +----------------------------+<- base + chunk_size
+ //
+ // Non-executable
+ // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+ // | Header |
+ // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+ // | Area |
+ // +----------------------------+<- area_end_ (area_start + commit_area_size)
+ // | Committed but not used |
+ // +----------------------------+<- aligned at OS page boundary
+ // | Reserved but not committed |
+ // +----------------------------+<- base + chunk_size
+ //
+
if (executable == EXECUTABLE) {
- chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
+ chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
OS::CommitPageSize()) + CodePageGuardSize();
// Check executable memory limit.
@@ -511,10 +609,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
return NULL;
}
+ // Size of header (not executable) plus area (executable).
+ size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
+ OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
if (isolate_->code_range()->exists()) {
- base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
+ base = isolate_->code_range()->AllocateRawMemory(chunk_size,
+ commit_size,
+ &chunk_size);
ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
MemoryChunk::kAlignment));
if (base == NULL) return NULL;
@@ -523,6 +626,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
size_executable_ += chunk_size;
} else {
base = AllocateAlignedMemory(chunk_size,
+ commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
@@ -533,14 +637,18 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, CodePageGuardStartOffset());
- ZapBlock(base + CodePageAreaStartOffset(), body_size);
+ ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
}
area_start = base + CodePageAreaStartOffset();
- area_end = area_start + body_size;
+ area_end = area_start + commit_area_size;
} else {
- chunk_size = MemoryChunk::kObjectStartOffset + body_size;
+ chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
+ OS::CommitPageSize());
+ size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
+ commit_area_size, OS::CommitPageSize());
base = AllocateAlignedMemory(chunk_size,
+ commit_size,
MemoryChunk::kAlignment,
executable,
&reservation);
@@ -548,13 +656,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
if (base == NULL) return NULL;
if (Heap::ShouldZapGarbage()) {
- ZapBlock(base, chunk_size);
+ ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
}
area_start = base + Page::kObjectStartOffset;
- area_end = base + chunk_size;
+ area_end = area_start + commit_area_size;
}
+ // Use chunk_size for statistics and callbacks because we assume that they
+ // treat reserved but not-yet committed memory regions of chunks as allocated.
isolate_->counters()->memory_allocated()->
Increment(static_cast<int>(chunk_size));
@@ -579,7 +689,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == NULL) return NULL;
@@ -590,7 +700,10 @@ Page* MemoryAllocator::AllocatePage(intptr_t size,
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
Space* owner,
Executability executable) {
- MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
+ MemoryChunk* chunk = AllocateChunk(object_size,
+ object_size,
+ executable,
+ owner);
if (chunk == NULL) return NULL;
return LargePage::Initialize(isolate_->heap(), chunk);
}
@@ -732,9 +845,10 @@ int MemoryAllocator::CodePageAreaEndOffset() {
}
-bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
- Address start,
- size_t size) {
+bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size) {
// Commit page header (not executable).
if (!vm->Commit(start,
CodePageGuardStartOffset(),
@@ -748,15 +862,14 @@ bool MemoryAllocator::CommitCodePage(VirtualMemory* vm,
}
// Commit page body (executable).
- size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize();
if (!vm->Commit(start + CodePageAreaStartOffset(),
- area_size,
+ commit_size - CodePageGuardStartOffset(),
true)) {
return false;
}
- // Create guard page after the allocatable area.
- if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) {
+ // Create guard page before the end.
+ if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
return false;
}
@@ -968,36 +1081,6 @@ void PagedSpace::ReleasePage(Page* page) {
}
-void PagedSpace::ReleaseAllUnusedPages() {
- PageIterator it(this);
- while (it.has_next()) {
- Page* page = it.next();
- if (!page->WasSwept()) {
- if (page->LiveBytes() == 0) ReleasePage(page);
- } else {
- HeapObject* obj = HeapObject::FromAddress(page->area_start());
- if (obj->IsFreeSpace() &&
- FreeSpace::cast(obj)->size() == AreaSize()) {
- // Sometimes we allocate memory from free list but don't
- // immediately initialize it (e.g. see PagedSpace::ReserveSpace
- // called from Heap::ReserveSpace that can cause GC before
- // reserved space is actually initialized).
- // Thus we can't simply assume that obj represents a valid
- // node still owned by a free list
- // Instead we should verify that the page is fully covered
- // by free list items.
- FreeList::SizeStats sizes;
- free_list_.CountFreeListItems(page, &sizes);
- if (sizes.Total() == AreaSize()) {
- ReleasePage(page);
- }
- }
- }
- }
- heap()->FreeQueuedChunks();
-}
-
-
#ifdef DEBUG
void PagedSpace::Print() { }
#endif
@@ -1679,6 +1762,7 @@ static void ReportCodeKindStatistics() {
CASE(FUNCTION);
CASE(OPTIMIZED_FUNCTION);
CASE(STUB);
+ CASE(COMPILED_STUB);
CASE(BUILTIN);
CASE(LOAD_IC);
CASE(KEYED_LOAD_IC);
@@ -1890,7 +1974,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
FreeListNode* FreeListNode::next() {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
return reinterpret_cast<FreeListNode*>(
Memory::Address_at(address() + kNextOffset));
@@ -1903,7 +1987,7 @@ FreeListNode* FreeListNode::next() {
FreeListNode** FreeListNode::next_address() {
ASSERT(IsFreeListNode(this));
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(Size() >= kNextOffset + kPointerSize);
return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
} else {
@@ -1917,7 +2001,7 @@ void FreeListNode::set_next(FreeListNode* next) {
// While we are booting the VM the free space map will actually be null. So
// we have to make sure that we don't try to use it for anything at that
// stage.
- if (map() == HEAP->raw_unchecked_free_space_map()) {
+ if (map() == GetHeap()->raw_unchecked_free_space_map()) {
ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
Memory::Address_at(address() + kNextOffset) =
reinterpret_cast<Address>(next);
@@ -1928,52 +2012,72 @@ void FreeListNode::set_next(FreeListNode* next) {
}
-FreeList::FreeList(PagedSpace* owner)
- : owner_(owner), heap_(owner->heap()) {
- Reset();
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+ intptr_t free_bytes = 0;
+ if (category->top_ != NULL) {
+ ASSERT(category->end_ != NULL);
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order.
+ ScopedLock lock_target(mutex_);
+ ScopedLock lock_source(category->mutex());
+ free_bytes = category->available();
+ if (end_ == NULL) {
+ end_ = category->end();
+ } else {
+ category->end()->set_next(top_);
+ }
+ top_ = category->top();
+ available_ += category->available();
+ category->Reset();
+ }
+ return free_bytes;
}
-void FreeList::Reset() {
+void FreeListCategory::Reset() {
+ top_ = NULL;
+ end_ = NULL;
available_ = 0;
- small_list_ = NULL;
- medium_list_ = NULL;
- large_list_ = NULL;
- huge_list_ = NULL;
}
-int FreeList::Free(Address start, int size_in_bytes) {
- if (size_in_bytes == 0) return 0;
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
+intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) {
+ int sum = 0;
+ FreeListNode* n = top_;
+ while (n != NULL) {
+ if (Page::FromAddress(n->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
+ sum += free_space->Size();
+ }
+ n = n->next();
+ }
+ return sum;
+}
- // Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) return size_in_bytes;
- // Insert other blocks at the head of a free list of the appropriate
- // magnitude.
- if (size_in_bytes <= kSmallListMax) {
- node->set_next(small_list_);
- small_list_ = node;
- } else if (size_in_bytes <= kMediumListMax) {
- node->set_next(medium_list_);
- medium_list_ = node;
- } else if (size_in_bytes <= kLargeListMax) {
- node->set_next(large_list_);
- large_list_ = node;
- } else {
- node->set_next(huge_list_);
- huge_list_ = node;
+intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
+ int sum = 0;
+ FreeListNode** n = &top_;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ sum += free_space->Size();
+ *n = (*n)->next();
+ } else {
+ n = (*n)->next_address();
+ }
}
- available_ += size_in_bytes;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
- return 0;
+ if (top_ == NULL) {
+ end_ = NULL;
+ }
+ available_ -= sum;
+ return sum;
}
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
- FreeListNode* node = *list;
+FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
+ FreeListNode* node = top_;
if (node == NULL) return NULL;
@@ -1984,59 +2088,150 @@ FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
}
if (node != NULL) {
+ set_top(node->next());
*node_size = node->Size();
- *list = node->next();
+ available_ -= *node_size;
} else {
- *list = NULL;
+ set_top(NULL);
+ }
+
+ if (top() == NULL) {
+ set_end(NULL);
}
return node;
}
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+ node->set_next(top_);
+ top_ = node;
+ if (end_ == NULL) {
+ end_ = node;
+ }
+ available_ += size_in_bytes;
+}
+
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+ FreeListNode* n = top_;
+ while (n != NULL) {
+ Map** map_location = reinterpret_cast<Map**>(n->address());
+ if (*map_location == NULL) {
+ *map_location = heap->free_space_map();
+ } else {
+ ASSERT(*map_location == heap->free_space_map());
+ }
+ n = n->next();
+ }
+}
+
+
+FreeList::FreeList(PagedSpace* owner)
+ : owner_(owner), heap_(owner->heap()) {
+ Reset();
+}
+
+
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+ intptr_t free_bytes = 0;
+ free_bytes += small_list_.Concatenate(free_list->small_list());
+ free_bytes += medium_list_.Concatenate(free_list->medium_list());
+ free_bytes += large_list_.Concatenate(free_list->large_list());
+ free_bytes += huge_list_.Concatenate(free_list->huge_list());
+ return free_bytes;
+}
+
+
+void FreeList::Reset() {
+ small_list_.Reset();
+ medium_list_.Reset();
+ large_list_.Reset();
+ huge_list_.Reset();
+}
+
+
+int FreeList::Free(Address start, int size_in_bytes) {
+ if (size_in_bytes == 0) return 0;
+
+ FreeListNode* node = FreeListNode::FromAddress(start);
+ node->set_size(heap_, size_in_bytes);
+
+ // Early return to drop too-small blocks on the floor.
+ if (size_in_bytes < kSmallListMin) return size_in_bytes;
+
+ // Insert other blocks at the head of a free list of the appropriate
+ // magnitude.
+ if (size_in_bytes <= kSmallListMax) {
+ small_list_.Free(node, size_in_bytes);
+ } else if (size_in_bytes <= kMediumListMax) {
+ medium_list_.Free(node, size_in_bytes);
+ } else if (size_in_bytes <= kLargeListMax) {
+ large_list_.Free(node, size_in_bytes);
+ } else {
+ huge_list_.Free(node, size_in_bytes);
+ }
+
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+ return 0;
+}
+
+
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* node = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
- node = PickNodeFromList(&small_list_, node_size);
+ node = small_list_.PickNodeFromList(node_size);
if (node != NULL) return node;
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = PickNodeFromList(&medium_list_, node_size);
+ node = medium_list_.PickNodeFromList(node_size);
if (node != NULL) return node;
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = PickNodeFromList(&large_list_, node_size);
+ node = large_list_.PickNodeFromList(node_size);
if (node != NULL) return node;
}
- for (FreeListNode** cur = &huge_list_;
+ int huge_list_available = huge_list_.available();
+ for (FreeListNode** cur = huge_list_.GetTopAddress();
*cur != NULL;
cur = (*cur)->next_address()) {
FreeListNode* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
cur_node = cur_node->next();
}
*cur = cur_node;
- if (cur_node == NULL) break;
+ if (cur_node == NULL) {
+ huge_list_.set_end(NULL);
+ break;
+ }
- ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+ ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
int size = cur_as_free_space->Size();
if (size >= size_in_bytes) {
// Large enough node found. Unlink it from the list.
node = *cur;
- *node_size = size;
*cur = node->next();
+ *node_size = size;
+ huge_list_available -= size;
break;
}
}
+ if (huge_list_.top() == NULL) {
+ huge_list_.set_end(NULL);
+ }
+
+ huge_list_.set_available(huge_list_available);
+ ASSERT(IsVeryLong() || available() == SumFreeLists());
+
return node;
}
@@ -2056,8 +2251,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
if (new_node == NULL) return NULL;
- available_ -= new_node_size;
- ASSERT(IsVeryLong() || available_ == SumFreeLists());
int bytes_left = new_node_size - size_in_bytes;
ASSERT(bytes_left >= 0);
@@ -2115,25 +2308,12 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
}
-static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
- intptr_t sum = 0;
- while (n != NULL) {
- if (Page::FromAddress(n->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
- sum += free_space->Size();
- }
- n = n->next();
- }
- return sum;
-}
-
-
void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
- sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+ sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p);
if (sizes->huge_size_ < p->area_size()) {
- sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
- sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
- sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+ sizes->small_size_ = small_list_.CountFreeListItemsInList(p);
+ sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p);
+ sizes->large_size_ = large_list_.CountFreeListItemsInList(p);
} else {
sizes->small_size_ = 0;
sizes->medium_size_ = 0;
@@ -2142,41 +2322,33 @@ void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
}
-static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
- intptr_t sum = 0;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
- }
- }
- return sum;
-}
-
-
intptr_t FreeList::EvictFreeListItems(Page* p) {
- intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
+ intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
if (sum < p->area_size()) {
- sum += EvictFreeListItemsInList(&small_list_, p) +
- EvictFreeListItemsInList(&medium_list_, p) +
- EvictFreeListItemsInList(&large_list_, p);
+ sum += small_list_.EvictFreeListItemsInList(p) +
+ medium_list_.EvictFreeListItemsInList(p) +
+ large_list_.EvictFreeListItemsInList(p);
}
- available_ -= static_cast<int>(sum);
-
return sum;
}
+void FreeList::RepairLists(Heap* heap) {
+ small_list_.RepairFreeList(heap);
+ medium_list_.RepairFreeList(heap);
+ large_list_.RepairFreeList(heap);
+ huge_list_.RepairFreeList(heap);
+}
+
+
#ifdef DEBUG
-intptr_t FreeList::SumFreeList(FreeListNode* cur) {
+intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
+ FreeListNode* cur = top_;
while (cur != NULL) {
- ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
+ ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
sum += cur_as_free_space->Size();
cur = cur->next();
@@ -2188,8 +2360,9 @@ intptr_t FreeList::SumFreeList(FreeListNode* cur) {
static const int kVeryLongFreeList = 500;
-int FreeList::FreeListLength(FreeListNode* cur) {
+int FreeListCategory::FreeListLength() {
int length = 0;
+ FreeListNode* cur = top_;
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2200,10 +2373,10 @@ int FreeList::FreeListLength(FreeListNode* cur) {
bool FreeList::IsVeryLong() {
- if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
- if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
+ if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+ if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
return false;
}
@@ -2212,10 +2385,10 @@ bool FreeList::IsVeryLong() {
// on the free list, so it should not be called if FreeListLength returns
// kVeryLongFreeList.
intptr_t FreeList::SumFreeLists() {
- intptr_t sum = SumFreeList(small_list_);
- sum += SumFreeList(medium_list_);
- sum += SumFreeList(large_list_);
- sum += SumFreeList(huge_list_);
+ intptr_t sum = small_list_.SumFreeList();
+ sum += medium_list_.SumFreeList();
+ sum += large_list_.SumFreeList();
+ sum += huge_list_.SumFreeList();
return sum;
}
#endif
@@ -2303,27 +2476,6 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
}
-static void RepairFreeList(Heap* heap, FreeListNode* n) {
- while (n != NULL) {
- Map** map_location = reinterpret_cast<Map**>(n->address());
- if (*map_location == NULL) {
- *map_location = heap->free_space_map();
- } else {
- ASSERT(*map_location == heap->free_space_map());
- }
- n = n->next();
- }
-}
-
-
-void FreeList::RepairLists(Heap* heap) {
- RepairFreeList(heap, small_list_);
- RepairFreeList(heap, medium_list_);
- RepairFreeList(heap, large_list_);
- RepairFreeList(heap, huge_list_);
-}
-
-
// After we have booted, we have created a map which represents free space
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
@@ -2343,7 +2495,7 @@ bool LargeObjectSpace::ReserveSpace(int bytes) {
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsSweepingComplete()) return true;
+ if (IsLazySweepingComplete()) return true;
intptr_t freed_bytes = 0;
Page* p = first_unswept_page_;
@@ -2355,7 +2507,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
reinterpret_cast<intptr_t>(p));
}
DecreaseUnsweptFreeBytes(p);
- freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+ freed_bytes +=
+ MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ this, NULL, p);
}
p = next_page;
} while (p != anchor() && freed_bytes < bytes_to_sweep);
@@ -2368,7 +2523,7 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
heap()->FreeQueuedChunks();
- return IsSweepingComplete();
+ return IsLazySweepingComplete();
}
@@ -2387,13 +2542,34 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->AreSweeperThreadsActivated()) {
+ if (FLAG_concurrent_sweeping) {
+ if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+ collector->WaitUntilSweepingCompleted();
+ collector->FinalizeSweeping();
+ return true;
+ }
+ return false;
+ }
+ return true;
+ } else {
+ return AdvanceSweeper(size_in_bytes);
+ }
+}
+
+
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper then sweep one page before
- // allocating a new page.
- if (first_unswept_page_->is_valid()) {
- AdvanceSweeper(size_in_bytes);
+ // If there are unswept pages advance lazy sweeper a bounded number of times
+ // until we find a size_in_bytes contiguous piece of memory
+ const int kMaxSweepingTries = 5;
+ bool sweeping_complete = false;
+
+ for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
+ sweeping_complete = EnsureSweeperProgress(size_in_bytes);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2415,8 +2591,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Last ditch, sweep all the remaining pages to try to find space. This may
// cause a pause.
- if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
+ if (!IsLazySweepingComplete()) {
+ EnsureSweeperProgress(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2784,7 +2960,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
previous = current;
current = current->next_page();
} else {
diff --git a/src/3rdparty/v8/src/spaces.h b/src/3rdparty/v8/src/spaces.h
index 9121e9c..e7e11db 100644
--- a/src/3rdparty/v8/src/spaces.h
+++ b/src/3rdparty/v8/src/spaces.h
@@ -320,7 +320,8 @@ class MemoryChunk {
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
kFailureTag) {
- return reinterpret_cast<Space*>(owner_ - kFailureTag);
+ return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+ kFailureTag);
} else {
return NULL;
}
@@ -397,6 +398,12 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -448,6 +455,18 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
+ intptr_t parallel_sweeping() const {
+ return parallel_sweeping_;
+ }
+
+ void set_parallel_sweeping(intptr_t state) {
+ parallel_sweeping_ = state;
+ }
+
+ bool TryParallelSweeping() {
+ return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+ }
+
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
@@ -480,6 +499,29 @@ class MemoryChunk {
write_barrier_counter_ = counter;
}
+ int progress_bar() {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ return progress_bar_;
+ }
+
+ void set_progress_bar(int progress_bar) {
+ ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
+ progress_bar_ = progress_bar;
+ }
+
+ void ResetProgressBar() {
+ if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+ set_progress_bar(0);
+ ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
+ }
+ }
+
+ bool IsLeftOfProgressBar(Object** slot) {
+ Address slot_address = reinterpret_cast<Address>(slot);
+ ASSERT(slot_address > this->address());
+ return (slot_address - (this->address() + kObjectStartOffset)) <
+ progress_bar();
+ }
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
@@ -504,8 +546,8 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kPointerSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+ kIntSize + kIntSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -616,6 +658,7 @@ class MemoryChunk {
int area_size() {
return static_cast<int>(area_end() - area_start());
}
+ bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() {
@@ -649,10 +692,15 @@ class MemoryChunk {
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
+ // Used by the incremental marker to keep track of the scanning progress in
+ // large objects that have a progress bar and are scanned in increments.
+ int progress_bar_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
+ intptr_t parallel_sweeping_;
+
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
@@ -855,8 +903,11 @@ class CodeRange {
// Allocates a chunk of memory from the large-object portion of
// the code range. On platforms with no separate code range, should
// not be called.
- MUST_USE_RESULT Address AllocateRawMemory(const size_t requested,
+ MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
+ const size_t commit_size,
size_t* allocated);
+ bool CommitRawMemory(Address start, size_t length);
+ bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
private:
@@ -1004,14 +1055,19 @@ class MemoryAllocator {
void ReportStatistics();
#endif
- MemoryChunk* AllocateChunk(intptr_t body_size,
+ // Returns a MemoryChunk in which the memory region from commit_area_size to
+ // reserve_area_size of the chunk area is reserved but not committed, it
+ // could be committed later by calling MemoryChunk::CommitArea.
+ MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
+ intptr_t commit_area_size,
Executability executable,
Space* space);
Address ReserveAlignedMemory(size_t requested,
size_t alignment,
VirtualMemory* controller);
- Address AllocateAlignedMemory(size_t requested,
+ Address AllocateAlignedMemory(size_t reserve_size,
+ size_t commit_size,
size_t alignment,
Executability executable,
VirtualMemory* controller);
@@ -1061,9 +1117,10 @@ class MemoryAllocator {
return CodePageAreaEndOffset() - CodePageAreaStartOffset();
}
- MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm,
- Address start,
- size_t size);
+ MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm,
+ Address start,
+ size_t commit_size,
+ size_t reserved_size);
private:
Isolate* isolate_;
@@ -1349,6 +1406,63 @@ class FreeListNode: public HeapObject {
};
+// The free list category holds a pointer to the top element and a pointer to
+// the end element of the linked list of free memory blocks.
+class FreeListCategory {
+ public:
+ FreeListCategory() :
+ top_(NULL),
+ end_(NULL),
+ mutex_(OS::CreateMutex()),
+ available_(0) {}
+
+ ~FreeListCategory() {
+ delete mutex_;
+ }
+
+ intptr_t Concatenate(FreeListCategory* category);
+
+ void Reset();
+
+ void Free(FreeListNode* node, int size_in_bytes);
+
+ FreeListNode* PickNodeFromList(int *node_size);
+
+ intptr_t CountFreeListItemsInList(Page* p);
+
+ intptr_t EvictFreeListItemsInList(Page* p);
+
+ void RepairFreeList(Heap* heap);
+
+ FreeListNode** GetTopAddress() { return &top_; }
+ FreeListNode* top() const { return top_; }
+ void set_top(FreeListNode* top) { top_ = top; }
+
+ FreeListNode** GetEndAddress() { return &end_; }
+ FreeListNode* end() const { return end_; }
+ void set_end(FreeListNode* end) { end_ = end; }
+
+ int* GetAvailableAddress() { return &available_; }
+ int available() const { return available_; }
+ void set_available(int available) { available_ = available; }
+
+ Mutex* mutex() { return mutex_; }
+
+#ifdef DEBUG
+ intptr_t SumFreeList();
+ int FreeListLength();
+#endif
+
+ private:
+ FreeListNode* top_;
+ FreeListNode* end_;
+ Mutex* mutex_;
+
+ // Total available bytes in all blocks of this free list category.
+ int available_;
+};
+
+
// The free list for the old space. The free list is organized in such a way
// as to encourage objects allocated around the same time to be near each
// other. The normal way to allocate is intended to be by bumping a 'top'
@@ -1376,11 +1490,16 @@ class FreeList BASE_EMBEDDED {
public:
explicit FreeList(PagedSpace* owner);
+ intptr_t Concatenate(FreeList* free_list);
+
// Clear the free list.
void Reset();
// Return the number of bytes available on the free list.
- intptr_t available() { return available_; }
+ intptr_t available() {
+ return small_list_.available() + medium_list_.available() +
+ large_list_.available() + huge_list_.available();
+ }
// Place a node on the free list. The block of size 'size_in_bytes'
// starting at 'start' is placed on the free list. The return value is the
@@ -1398,8 +1517,6 @@ class FreeList BASE_EMBEDDED {
#ifdef DEBUG
void Zap();
- static intptr_t SumFreeList(FreeListNode* node);
- static int FreeListLength(FreeListNode* cur);
intptr_t SumFreeLists();
bool IsVeryLong();
#endif
@@ -1422,21 +1539,21 @@ class FreeList BASE_EMBEDDED {
intptr_t EvictFreeListItems(Page* p);
+ FreeListCategory* small_list() { return &small_list_; }
+ FreeListCategory* medium_list() { return &medium_list_; }
+ FreeListCategory* large_list() { return &large_list_; }
+ FreeListCategory* huge_list() { return &huge_list_; }
+
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
- FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
-
FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
PagedSpace* owner_;
Heap* heap_;
- // Total available bytes in all blocks on this free list.
- int available_;
-
static const int kSmallListMin = 0x20 * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
@@ -1444,10 +1561,10 @@ class FreeList BASE_EMBEDDED {
static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
static const int kMediumAllocationMax = kSmallListMax;
static const int kLargeAllocationMax = kMediumListMax;
- FreeListNode* small_list_;
- FreeListNode* medium_list_;
- FreeListNode* large_list_;
- FreeListNode* huge_list_;
+ FreeListCategory small_list_;
+ FreeListCategory medium_list_;
+ FreeListCategory large_list_;
+ FreeListCategory huge_list_;
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
@@ -1513,6 +1630,11 @@ class PagedSpace : public Space {
accounting_stats_.ClearSizeWaste();
}
+ // Increases the number of available bytes of that space.
+ void AddToAccountingStats(intptr_t bytes) {
+ accounting_stats_.DeallocateBytes(bytes);
+ }
+
// Available bytes without growing. These are the bytes on the free list.
// The bytes in the linear allocation area are not included in this total
// because updating the stats would slow down allocation. New pages are
@@ -1527,7 +1649,8 @@ class PagedSpace : public Space {
// As size, but the bytes in lazily swept pages are estimated and the bytes
// in the current linear allocation area are not included.
virtual intptr_t SizeOfObjects() {
- ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ // TODO(hpayer): broken when concurrent sweeping turned on
+ ASSERT(!IsLazySweepingComplete() || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
@@ -1580,9 +1703,6 @@ class PagedSpace : public Space {
// Releases an unused page and shrinks the space.
void ReleasePage(Page* page);
- // Releases all of the unused pages.
- void ReleaseAllUnusedPages();
-
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }
@@ -1625,7 +1745,7 @@ class PagedSpace : public Space {
first_unswept_page_ = first;
}
- void IncrementUnsweptFreeBytes(int by) {
+ void IncrementUnsweptFreeBytes(intptr_t by) {
unswept_free_bytes_ += by;
}
@@ -1634,14 +1754,27 @@ class PagedSpace : public Space {
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
+ void DecrementUnsweptFreeBytes(intptr_t by) {
+ unswept_free_bytes_ -= by;
+ }
+
void DecreaseUnsweptFreeBytes(Page* p) {
ASSERT(ShouldBeSweptLazily(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
+ void ResetUnsweptFreeBytes() {
+ unswept_free_bytes_ = 0;
+ }
+
bool AdvanceSweeper(intptr_t bytes_to_sweep);
- bool IsSweepingComplete() {
+ // When parallel sweeper threads are active this function waits
+ // for them to complete, otherwise AdvanceSweeper with size_in_bytes
+ // is called.
+ bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
+ bool IsLazySweepingComplete() {
return !first_unswept_page_->is_valid();
}
@@ -1665,6 +1798,8 @@ class PagedSpace : public Space {
}
protected:
+ FreeList* free_list() { return &free_list_; }
+
int area_size_;
// Maximum capacity of this space.
@@ -1714,6 +1849,7 @@ class PagedSpace : public Space {
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
+ friend class SweeperThread;
};
@@ -2408,11 +2544,9 @@ class FixedSpace : public PagedSpace {
FixedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- int object_size_in_bytes,
- const char* name)
+ int object_size_in_bytes)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes),
- name_(name) {
+ object_size_in_bytes_(object_size_in_bytes) {
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
@@ -2429,9 +2563,6 @@ class FixedSpace : public PagedSpace {
private:
// The size of objects in this space.
int object_size_in_bytes_;
-
- // The name of this space.
- const char* name_;
};
@@ -2442,7 +2573,7 @@ class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
+ : FixedSpace(heap, max_capacity, id, Map::kSize),
max_map_space_pages_(kMaxMapPageIndex - 1) {
}
@@ -2483,7 +2614,7 @@ class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
+ : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize)
{}
virtual int RoundSizeDownToObjectAlignment(int size) {
diff --git a/src/3rdparty/v8/src/store-buffer.cc b/src/3rdparty/v8/src/store-buffer.cc
index 66488ae..8a69164 100644
--- a/src/3rdparty/v8/src/store-buffer.cc
+++ b/src/3rdparty/v8/src/store-buffer.cc
@@ -687,10 +687,15 @@ void StoreBuffer::Compact() {
uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
// Shift out the last bits including any tags.
int_addr >>= kPointerSizeLog2;
- int hash1 =
- ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+ // The upper part of an address is basically random because of ASLR and OS
+ // non-determinism, so we use only the bits within a page for hashing to
+ // make v8's behavior (more) deterministic.
+ uintptr_t hash_addr =
+ int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
+ int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
+ (kHashSetLength - 1));
if (hash_set_1_[hash1] == int_addr) continue;
- uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2));
+ uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
hash2 &= (kHashSetLength - 1);
if (hash_set_2_[hash2] == int_addr) continue;
diff --git a/src/3rdparty/v8/src/store-buffer.h b/src/3rdparty/v8/src/store-buffer.h
index 0ade8ce..79046d1 100644
--- a/src/3rdparty/v8/src/store-buffer.h
+++ b/src/3rdparty/v8/src/store-buffer.h
@@ -210,8 +210,7 @@ class StoreBufferRebuildScope {
explicit StoreBufferRebuildScope(Heap* heap,
StoreBuffer* store_buffer,
StoreBufferCallback callback)
- : heap_(heap),
- store_buffer_(store_buffer),
+ : store_buffer_(store_buffer),
stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
stored_callback_(store_buffer->callback_) {
store_buffer_->store_buffer_rebuilding_enabled_ = true;
@@ -226,7 +225,6 @@ class StoreBufferRebuildScope {
}
private:
- Heap* heap_;
StoreBuffer* store_buffer_;
bool stored_state_;
StoreBufferCallback stored_callback_;
diff --git a/src/3rdparty/v8/src/string-search.h b/src/3rdparty/v8/src/string-search.h
index 8c3456a..86237f3 100644
--- a/src/3rdparty/v8/src/string-search.h
+++ b/src/3rdparty/v8/src/string-search.h
@@ -53,7 +53,11 @@ class StringSearchBase {
// a potentially less efficient searching, but is a safe approximation.
// For needles using only characters in the same Unicode 256-code point page,
// there is no search speed degradation.
+#ifndef ENABLE_LATIN_1
static const int kAsciiAlphabetSize = 128;
+#else
+ static const int kAsciiAlphabetSize = 256;
+#endif
static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
// Bad-char shift table stored in the state. It's length is the alphabet size.
@@ -61,12 +65,12 @@ class StringSearchBase {
// to compensate for the algorithmic overhead compared to simple brute force.
static const int kBMMinPatternLength = 7;
- static inline bool IsAsciiString(Vector<const char>) {
+ static inline bool IsOneByteString(Vector<const uint8_t> string) {
return true;
}
- static inline bool IsAsciiString(Vector<const uc16> string) {
- return String::IsAscii(string.start(), string.length());
+ static inline bool IsOneByteString(Vector<const uc16> string) {
+ return String::IsOneByte(string.start(), string.length());
}
friend class Isolate;
@@ -81,7 +85,7 @@ class StringSearch : private StringSearchBase {
pattern_(pattern),
start_(Max(0, pattern.length() - kBMMaxShift)) {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (!IsAsciiString(pattern_)) {
+ if (!IsOneByteString(pattern_)) {
strategy_ = &FailSearch;
return;
}
@@ -150,13 +154,25 @@ class StringSearch : private StringSearchBase {
void PopulateBoyerMooreTable();
+ static inline bool exceedsOneByte(uint8_t c) {
+#ifdef ENABLE_LATIN_1
+ return false;
+#else
+ return c > String::kMaxOneByteCharCodeU;
+#endif
+ }
+
+ static inline bool exceedsOneByte(uint16_t c) {
+ return c > String::kMaxOneByteCharCodeU;
+ }
+
static inline int CharOccurrence(int* bad_char_occurrence,
SubjectChar char_code) {
if (sizeof(SubjectChar) == 1) {
return bad_char_occurrence[static_cast<int>(char_code)];
}
if (sizeof(PatternChar) == 1) {
- if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) {
+ if (exceedsOneByte(char_code)) {
return -1;
}
return bad_char_occurrence[static_cast<unsigned int>(char_code)];
@@ -223,7 +239,7 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
return static_cast<int>(pos - subject.start());
} else {
if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) {
+ if (exceedsOneByte(pattern_first_char)) {
return -1;
}
}
diff --git a/src/3rdparty/v8/src/string-stream.cc b/src/3rdparty/v8/src/string-stream.cc
index cffd7b0..97b4d32 100644
--- a/src/3rdparty/v8/src/string-stream.cc
+++ b/src/3rdparty/v8/src/string-stream.cc
@@ -311,14 +311,14 @@ bool StringStream::Put(String* str) {
bool StringStream::Put(String* str, int start, int end) {
- StringInputBuffer name_buffer(str);
- name_buffer.Seek(start);
- for (int i = start; i < end && name_buffer.has_more(); i++) {
- int c = name_buffer.GetNext();
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op, start);
+ for (int i = start; i < end && stream.HasMore(); i++) {
+ uint16_t c = stream.GetNext();
if (c >= 127 || c < 32) {
c = '?';
}
- if (!Put(c)) {
+ if (!Put(static_cast<char>(c))) {
return false; // Output was truncated.
}
}
@@ -493,7 +493,7 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
// Common case: on-stack function present and resolved.
PrintPrototype(fun, receiver);
*code = fun->code();
- } else if (f->IsSymbol()) {
+ } else if (f->IsInternalizedString()) {
// Unresolved and megamorphic calls: Instead of the function
// we have the function name on the stack.
PrintName(f);
@@ -533,11 +533,13 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) {
void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
Object* name = fun->shared()->name();
bool print_name = false;
- Heap* heap = HEAP;
- for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) {
+ Isolate* isolate = fun->GetIsolate();
+ for (Object* p = receiver;
+ p != isolate->heap()->null_value();
+ p = p->GetPrototype(isolate)) {
if (p->IsJSObject()) {
Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
- if (key != heap->undefined_value()) {
+ if (key != isolate->heap()->undefined_value()) {
if (!name->IsString() ||
!key->IsString() ||
!String::cast(name)->Equals(String::cast(key))) {
diff --git a/src/3rdparty/v8/src/string.js b/src/3rdparty/v8/src/string.js
index 6115930..2f8043c 100644
--- a/src/3rdparty/v8/src/string.js
+++ b/src/3rdparty/v8/src/string.js
@@ -186,11 +186,15 @@ function StringMatch(regexp) {
}
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even though
+ // value is discarded.
+ ToInteger(regexp.lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
var result = %StringMatch(subject, regexp, lastMatchInfo);
if (result !== null) lastMatchInfoOverride = null;
+ regexp.lastIndex = 0;
return result;
}
// Non-regexp argument.
@@ -199,16 +203,6 @@ function StringMatch(regexp) {
}
-// SubString is an internal function that returns the sub string of 'string'.
-// If resulting string is of length 1, we use the one character cache
-// otherwise we call the runtime system.
-function SubString(string, start, end) {
- // Use the one character string cache.
- if (start + 1 == end) return %_StringCharAt(string, start);
- return %_SubString(string, start, end);
-}
-
-
// This has the same size as the lastMatchInfo array, and can be used for
// functions that expect that structure to be returned. It is used when the
// needle is a string rather than a regexp. In this case we can't update
@@ -225,33 +219,61 @@ function StringReplace(search, replace) {
}
var subject = TO_STRING_INLINE(this);
- // Delegate to one of the regular expression variants if necessary.
+ // Decision tree for dispatch
+ // .. regexp search
+ // .... string replace
+ // ...... non-global search
+ // ........ empty string replace
+ // ........ non-empty string replace (with $-expansion)
+ // ...... global search
+ // ........ no need to circumvent last match info override
+ // ........ need to circument last match info override
+ // .... function replace
+ // ...... global search
+ // ...... non-global search
+ // .. string search
+ // .... special case that replaces with one single character
+ // ...... function replace
+ // ...... string replace (with $-expansion)
+
if (IS_REGEXP(search)) {
+ // Emulate RegExp.prototype.exec's side effect in step 5, even if
+ // value is discarded.
+ ToInteger(search.lastIndex);
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
- if (IS_SPEC_FUNCTION(replace)) {
- if (search.global) {
- return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
- } else {
- return StringReplaceNonGlobalRegExpWithFunction(subject,
- search,
- replace);
+
+ if (!IS_SPEC_FUNCTION(replace)) {
+ replace = TO_STRING_INLINE(replace);
+
+ if (!search.global) {
+ // Non-global regexp search, string replace.
+ var match = DoRegExpExec(search, subject, 0);
+ if (match == null) {
+ search.lastIndex = 0
+ return subject;
+ }
+ if (replace.length == 0) {
+ return %_SubString(subject, 0, match[CAPTURE0]) +
+ %_SubString(subject, match[CAPTURE1], subject.length)
+ }
+ return ExpandReplacement(replace, subject, lastMatchInfo,
+ %_SubString(subject, 0, match[CAPTURE0])) +
+ %_SubString(subject, match[CAPTURE1], subject.length);
}
- } else {
+
+ // Global regexp search, string replace.
+ search.lastIndex = 0;
if (lastMatchInfoOverride == null) {
- return %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
+ return %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, lastMatchInfo);
} else {
// We use this hack to detect whether StringReplaceRegExpWithString
- // found at least one hit. In that case we need to remove any
+ // found at least one hit. In that case we need to remove any
// override.
var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
- var answer = %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
+ var answer = %StringReplaceGlobalRegExpWithString(
+ subject, search, replace, lastMatchInfo);
if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
} else {
@@ -260,10 +282,17 @@ function StringReplace(search, replace) {
return answer;
}
}
+
+ if (search.global) {
+ // Global regexp search, function replace.
+ return StringReplaceGlobalRegExpWithFunction(subject, search, replace);
+ }
+ // Non-global regexp search, function replace.
+ return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
}
- // Convert the search argument to a string and search for it.
search = TO_STRING_INLINE(search);
+
if (search.length == 1 &&
subject.length > 0xFF &&
IS_STRING(replace) &&
@@ -277,7 +306,7 @@ function StringReplace(search, replace) {
if (start < 0) return subject;
var end = start + search.length;
- var result = SubString(subject, 0, start);
+ var result = %_SubString(subject, 0, start);
// Compute the string to replace with.
if (IS_SPEC_FUNCTION(replace)) {
@@ -286,11 +315,13 @@ function StringReplace(search, replace) {
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
- replace = TO_STRING_INLINE(replace);
- result = ExpandReplacement(replace, subject, reusableMatchInfo, result);
+ result = ExpandReplacement(TO_STRING_INLINE(replace),
+ subject,
+ reusableMatchInfo,
+ result);
}
- return result + SubString(subject, end, subject.length);
+ return result + %_SubString(subject, end, subject.length);
}
@@ -304,7 +335,7 @@ function ExpandReplacement(string, subject, matchInfo, result) {
return result;
}
- if (next > 0) result += SubString(string, 0, next);
+ if (next > 0) result += %_SubString(string, 0, next);
while (true) {
var expansion = '$';
@@ -316,13 +347,39 @@ function ExpandReplacement(string, subject, matchInfo, result) {
result += '$';
} else if (peek == 38) { // $& - match
++position;
- result += SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
+ result +=
+ %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]);
} else if (peek == 96) { // $` - prefix
++position;
- result += SubString(subject, 0, matchInfo[CAPTURE0]);
+ result += %_SubString(subject, 0, matchInfo[CAPTURE0]);
} else if (peek == 39) { // $' - suffix
++position;
- result += SubString(subject, matchInfo[CAPTURE1], subject.length);
+ result += %_SubString(subject, matchInfo[CAPTURE1], subject.length);
+ } else if (peek >= 48 && peek <= 57) {
+ // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99
+ var scaled_index = (peek - 48) << 1;
+ var advance = 1;
+ var number_of_captures = NUMBER_OF_CAPTURES(matchInfo);
+ if (position + 1 < string.length) {
+ var next = %_StringCharCodeAt(string, position + 1);
+ if (next >= 48 && next <= 57) {
+ var new_scaled_index = scaled_index * 10 + ((next - 48) << 1);
+ if (new_scaled_index < number_of_captures) {
+ scaled_index = new_scaled_index;
+ advance = 2;
+ }
+ }
+ }
+ if (scaled_index != 0 && scaled_index < number_of_captures) {
+ var start = matchInfo[CAPTURE(scaled_index)];
+ if (start >= 0) {
+ result +=
+ %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]);
+ }
+ position += advance;
+ } else {
+ result += '$';
+ }
} else {
result += '$';
}
@@ -337,14 +394,14 @@ function ExpandReplacement(string, subject, matchInfo, result) {
// haven't reached the end, we need to append the suffix.
if (next < 0) {
if (position < length) {
- result += SubString(string, position, length);
+ result += %_SubString(string, position, length);
}
return result;
}
// Append substring between the previous and the next $ character.
if (next > position) {
- result += SubString(string, position, next);
+ result += %_SubString(string, position, next);
}
}
return result;
@@ -360,7 +417,7 @@ function CaptureString(string, lastCaptureInfo, index) {
// If start isn't valid, return undefined.
if (start < 0) return;
var end = lastCaptureInfo[CAPTURE(scaled + 1)];
- return SubString(string, start, end);
+ return %_SubString(string, start, end);
}
@@ -401,7 +458,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
// input string and some replacements that were returned from the replace
// function.
var match_start = 0;
- var override = new InternalArray(null, 0, subject);
+ var override = new InternalPackedArray(null, 0, subject);
var receiver = %GetDefaultReceiver(replace);
for (var i = 0; i < len; i++) {
var elem = res[i];
@@ -451,9 +508,12 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) return subject;
+ if (IS_NULL(matchInfo)) {
+ regexp.lastIndex = 0;
+ return subject;
+ }
var index = matchInfo[CAPTURE0];
- var result = SubString(subject, 0, index);
+ var result = %_SubString(subject, 0, index);
var endOfMatch = matchInfo[CAPTURE1];
// Compute the parameter list consisting of the match, captures, index,
// and subject for the replace function invocation.
@@ -463,7 +523,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var receiver = %GetDefaultReceiver(replace);
if (m == 1) {
// No captures, only the match, which is always valid.
- var s = SubString(subject, index, endOfMatch);
+ var s = %_SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
replacement = %_CallFunction(receiver, s, index, subject, replace);
} else {
@@ -480,7 +540,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
result += replacement; // The add method converts to string if necessary.
// Can't use matchInfo any more from here, since the function could
// overwrite it.
- return result + SubString(subject, endOfMatch, subject.length);
+ return result + %_SubString(subject, endOfMatch, subject.length);
}
@@ -546,7 +606,7 @@ function StringSlice(start, end) {
return '';
}
- return SubString(s, start_i, end_i);
+ return %_SubString(s, start_i, end_i);
}
@@ -607,13 +667,13 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
while (true) {
if (startIndex === length) {
- result.push(SubString(subject, currentIndex, length));
+ result.push(%_SubString(subject, currentIndex, length));
break;
}
var matchInfo = DoRegExpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
- result.push(SubString(subject, currentIndex, length));
+ result.push(%_SubString(subject, currentIndex, length));
break;
}
var endIndex = matchInfo[CAPTURE1];
@@ -624,11 +684,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
continue;
}
- if (currentIndex + 1 == startMatch) {
- result.push(%_StringCharAt(subject, currentIndex));
- } else {
- result.push(%_SubString(subject, currentIndex, startMatch));
- }
+ result.push(%_SubString(subject, currentIndex, startMatch));
if (result.length === limit) break;
@@ -637,11 +693,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
var start = matchInfo[i++];
var end = matchInfo[i++];
if (end != -1) {
- if (start + 1 == end) {
- result.push(%_StringCharAt(subject, start));
- } else {
- result.push(%_SubString(subject, start, end));
- }
+ result.push(%_SubString(subject, start, end));
} else {
result.push(void 0);
}
@@ -685,9 +737,7 @@ function StringSubstring(start, end) {
}
}
- return ((start_i + 1 == end_i)
- ? %_StringCharAt(s, start_i)
- : %_SubString(s, start_i, end_i));
+ return %_SubString(s, start_i, end_i);
}
@@ -729,9 +779,7 @@ function StringSubstr(start, n) {
var end = start + len;
if (end > s.length) end = s.length;
- return ((start + 1 == end)
- ? %_StringCharAt(s, start)
- : %_SubString(s, start, end));
+ return %_SubString(s, start, end);
}
@@ -799,7 +847,6 @@ function StringTrimRight() {
return %StringTrim(TO_STRING_INLINE(this), false, true);
}
-var static_charcode_array = new InternalArray(4);
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
@@ -809,17 +856,25 @@ function StringFromCharCode(code) {
return %_StringCharFromCode(code & 0xffff);
}
- // NOTE: This is not super-efficient, but it is necessary because we
- // want to avoid converting to numbers from within the virtual
- // machine. Maybe we can find another way of doing this?
- var codes = static_charcode_array;
- for (var i = 0; i < n; i++) {
+ var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
+ var i;
+ for (i = 0; i < n; i++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code);
- codes[i] = code;
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ if (code < 0) code = code & 0xffff;
+ if (code > 0xff) break;
+ %_OneByteSeqStringSetChar(one_byte, i, code);
+ }
+ if (i == n) return one_byte;
+ one_byte = %TruncateString(one_byte, i);
+
+ var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
+ for (var j = 0; i < n; i++, j++) {
+ var code = %_Arguments(i);
+ if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
+ %_TwoByteSeqStringSetChar(two_byte, j, code);
}
- codes.length = n;
- return %StringFromCharCodeArray(codes);
+ return one_byte + two_byte;
}
diff --git a/src/3rdparty/v8/src/stub-cache.cc b/src/3rdparty/v8/src/stub-cache.cc
index 3796d2d..2711bbf 100644
--- a/src/3rdparty/v8/src/stub-cache.cc
+++ b/src/3rdparty/v8/src/stub-cache.cc
@@ -63,7 +63,7 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of string equality checks.
ASSERT(!heap()->InNewSpace(name));
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
// The state bits are not important to the hash function because
// the stub cache only contains monomorphic stubs. Make sure that
@@ -100,9 +100,70 @@ Code* StubCache::Set(String* name, Map* map, Code* code) {
}
+Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver,
+ Handle<JSObject> holder) {
+ InlineCacheHolderFlag cache_holder =
+ IC::GetCodeCacheForObject(*receiver, *holder);
+ return Handle<JSObject>(IC::GetCodeCacheHolder(
+ isolate_, *receiver, cache_holder));
+}
+
+
+Handle<Code> StubCache::FindStub(Handle<String> name,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::StubType type,
+ Code::IcFragment fragment) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(kind, fragment, type);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+ return Handle<Code>::null();
+}
+
+
+Handle<Code> StubCache::FindHandler(Handle<String> name,
+ Handle<JSObject> handler_holder,
+ Code::Kind kind,
+ Code::StubType type) {
+ return FindStub(name, handler_holder, kind, type, Code::HANDLER_FRAGMENT);
+}
+
+
+Handle<Code> StubCache::ComputeMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ Handle<Code> ic = FindStub(name, receiver, Code::LOAD_IC,
+ handler->type(), Code::IC_FRAGMENT);
+ if (!ic.is_null()) return ic;
+
+ LoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(
+ Handle<Map>(receiver->map()), handler, name);
+
+ JSObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
+}
+
+
+Handle<Code> StubCache::ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name) {
+ Handle<Code> ic = FindStub(name, receiver, Code::KEYED_LOAD_IC,
+ handler->type(), Code::IC_FRAGMENT);
+ if (!ic.is_null()) return ic;
+
+ KeyedLoadStubCompiler ic_compiler(isolate());
+ ic = ic_compiler.CompileMonomorphicIC(
+ Handle<Map>(receiver->map()), handler, name);
+
+ JSObject::UpdateMapCodeCache(receiver, name, ic);
+ return ic;
+}
+
+
Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
Handle<JSObject> receiver) {
- ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties());
// If no global objects are present in the prototype chain, the load
// nonexistent IC stub can be shared for all names for a given map
// and we use the empty string for the map cache in that case. If
@@ -110,66 +171,74 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
// property cells in the stub and therefore the stub will be
// specific to the name.
Handle<String> cache_name = factory()->empty_string();
- if (receiver->IsGlobalObject()) cache_name = name;
- Handle<JSObject> last = receiver;
- while (last->GetPrototype() != heap()->null_value()) {
- last = Handle<JSObject>(JSObject::cast(last->GetPrototype()));
- if (last->IsGlobalObject()) cache_name = name;
- }
+ Handle<JSObject> current;
+ Handle<Object> next = receiver;
+ Handle<GlobalObject> global;
+ do {
+ current = Handle<JSObject>::cast(next);
+ next = Handle<Object>(current->GetPrototype(), isolate_);
+ if (current->IsGlobalObject()) {
+ global = Handle<GlobalObject>::cast(current);
+ cache_name = name;
+ } else if (!current->HasFastProperties()) {
+ cache_name = name;
+ }
+ } while (!next->IsNull());
+
// Compile the stub that is either shared for all names or
// name specific if there are global objects involved.
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<Code> handler = FindHandler(
+ cache_name, receiver, Code::LOAD_IC, Code::NONEXISTENT);
+ if (!handler.is_null()) return handler;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadNonexistent(cache_name, receiver, last);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code));
- JSObject::UpdateMapCodeCache(receiver, cache_name, code);
- return code;
+ handler =
+ compiler.CompileLoadNonexistent(receiver, current, cache_name, global);
+ JSObject::UpdateMapCodeCache(receiver, cache_name, handler);
+ return handler;
}
Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ PropertyIndex field) {
+ if (receiver.is_identical_to(holder)) {
+ LoadFieldStub stub(LoadStubCompiler::receiver(),
+ field.is_inobject(holder),
+ field.translate(holder));
+ return stub.GetCode(isolate());
+ }
+
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::LOAD_IC, Code::FIELD);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadField(receiver, holder, field_index, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadField(receiver, holder, name, field);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
-Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
+Handle<Code> StubCache::ComputeLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<ExecutableAccessorInfo> callback) {
ASSERT(v8::ToCData<Address>(callback->getter()) != 0);
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, callback);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
@@ -177,19 +246,16 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> getter) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadViaGetter(name, receiver, holder, getter);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadViaGetter(receiver, holder, name, getter);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
@@ -197,42 +263,37 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> value) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> handler = FindHandler(
+ name, stub_holder, Code::LOAD_IC, Code::CONSTANT_FUNCTION);
+ if (!handler.is_null()) return handler;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadConstant(receiver, holder, value, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ handler = compiler.CompileLoadConstant(receiver, holder, name, value);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+
+ return handler;
}
Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
+ Handle<Code> handler =
compiler.CompileLoadInterceptor(receiver, holder, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
-Handle<Code> StubCache::ComputeLoadNormal() {
+Handle<Code> StubCache::ComputeLoadNormal(Handle<String> name,
+ Handle<JSObject> receiver) {
return isolate_->builtins()->LoadIC_Normal();
}
@@ -242,39 +303,40 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
bool is_dont_delete) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindStub(
+ name, stub_holder, Code::LOAD_IC, Code::NORMAL, Code::IC_FRAGMENT);
+ if (!stub.is_null()) return stub;
LoadStubCompiler compiler(isolate_);
- Handle<Code> code =
+ Handle<Code> ic =
compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete);
- PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ JSObject::UpdateMapCodeCache(stub_holder, name, ic);
+ return ic;
}
Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- int field_index) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ PropertyIndex field) {
+ if (receiver.is_identical_to(holder)) {
+ LoadFieldStub stub(KeyedLoadStubCompiler::receiver(),
+ field.is_inobject(holder),
+ field.translate(holder));
+ return stub.GetCode(isolate());
+ }
+
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD);
+ if (!stub.is_null()) return stub;
KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadField(name, receiver, holder, field_index);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadField(receiver, holder, name, field);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
@@ -282,37 +344,31 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
Handle<JSFunction> value) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
- Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> handler = FindHandler(
+ name, stub_holder, Code::KEYED_LOAD_IC, Code::CONSTANT_FUNCTION);
+ if (!handler.is_null()) return handler;
KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadConstant(name, receiver, holder, value);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ handler = compiler.CompileLoadConstant(receiver, holder, name, value);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
+ if (!stub.is_null()) return stub;
KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadInterceptor(receiver, holder, name);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
@@ -320,70 +376,17 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code =
- compiler.CompileLoadCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
- Handle<JSArray> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadArrayLength(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
- Handle<String> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Map> map(receiver->map());
- Handle<Object> probe(map->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Handle<ExecutableAccessorInfo> callback) {
+ Handle<JSObject> stub_holder = StubHolder(receiver, holder);
+ Handle<Code> stub = FindHandler(
+ name, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS);
+ if (!stub.is_null()) return stub;
KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadStringLength(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- Map::UpdateCodeCache(map, name, code);
- return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
- Handle<String> name,
- Handle<JSFunction> receiver) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- KeyedLoadStubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileLoadFunctionPrototype(name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
- JSObject::UpdateMapCodeCache(receiver, name, code);
- return code;
+ Handle<Code> handler =
+ compiler.CompileLoadCallback(receiver, holder, name, callback);
+ JSObject::UpdateMapCodeCache(stub_holder, name, handler);
+ return handler;
}
@@ -395,83 +398,58 @@ Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
Code::StubType type =
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::STORE_IC, strict_mode, type);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
compiler.CompileStoreField(receiver, field_index, transition, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
+Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
+ Handle<String> name =
+ isolate()->factory()->KeyedLoadElementMonomorphic_string();
+
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedLoadStubCompiler compiler(isolate());
+ Handle<Code> code = compiler.CompileLoadElement(receiver_map);
+
+ Map::UpdateCodeCache(receiver_map, name, code);
+ return code;
+}
+
+
+Handle<Code> StubCache::ComputeKeyedStoreElement(
Handle<Map> receiver_map,
- KeyedIC::StubKind stub_kind,
- StrictModeFlag strict_mode) {
- KeyedAccessGrowMode grow_mode =
- KeyedIC::GetGrowModeFromStubKind(stub_kind);
+ KeyedStoreIC::StubKind stub_kind,
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode) {
Code::ExtraICState extra_state =
Code::ComputeExtraICState(grow_mode, strict_mode);
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(
- stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC
- : Code::KEYED_STORE_IC,
- Code::NORMAL,
- extra_state);
- Handle<String> name;
- switch (stub_kind) {
- case KeyedIC::LOAD:
- name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol();
- break;
- case KeyedIC::STORE_NO_TRANSITION:
- name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol();
- break;
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION:
- name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol();
- break;
- default:
- UNREACHABLE();
- break;
- }
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
- if (probe->IsCode()) return Handle<Code>::cast(probe);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::KEYED_STORE_IC, extra_state);
- Handle<Code> code;
- switch (stub_kind) {
- case KeyedIC::LOAD: {
- KeyedLoadStubCompiler compiler(isolate_);
- code = compiler.CompileLoadElement(receiver_map);
- break;
- }
- case KeyedIC::STORE_AND_GROW_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(isolate_, strict_mode,
- ALLOW_JSARRAY_GROWTH);
- code = compiler.CompileStoreElement(receiver_map);
- break;
- }
- case KeyedIC::STORE_NO_TRANSITION: {
- KeyedStoreStubCompiler compiler(isolate_, strict_mode,
- DO_NOT_ALLOW_JSARRAY_GROWTH);
- code = compiler.CompileStoreElement(receiver_map);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
+ ASSERT(stub_kind == KeyedStoreIC::STORE_NO_TRANSITION ||
+ stub_kind == KeyedStoreIC::STORE_AND_GROW_NO_TRANSITION);
- ASSERT(!code.is_null());
+ Handle<String> name = stub_kind == KeyedStoreIC::STORE_NO_TRANSITION
+ ? isolate()->factory()->KeyedStoreElementMonomorphic_string()
+ : isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_string();
+
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode);
+ Handle<Code> code = compiler.CompileStoreElement(receiver_map);
- if (stub_kind == KeyedIC::LOAD) {
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0));
- } else {
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0));
- }
Map::UpdateCodeCache(receiver_map, name, code);
return code;
}
@@ -489,35 +467,34 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
Handle<JSGlobalPropertyCell> cell,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::NORMAL, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::STORE_IC, strict_mode);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
-Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback,
- StrictModeFlag strict_mode) {
+Handle<Code> StubCache::ComputeStoreCallback(
+ Handle<String> name,
+ Handle<JSObject> receiver,
+ Handle<JSObject> holder,
+ Handle<ExecutableAccessorInfo> callback,
+ StrictModeFlag strict_mode) {
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::STORE_IC, strict_mode, Code::CALLBACKS);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
compiler.CompileStoreCallback(name, receiver, holder, callback);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -529,15 +506,14 @@ Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
Handle<JSFunction> setter,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::STORE_IC, strict_mode, Code::CALLBACKS);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
compiler.CompileStoreViaSetter(name, receiver, holder, setter);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -547,14 +523,13 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
Handle<JSObject> receiver,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::STORE_IC, strict_mode, Code::INTERCEPTOR);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -567,16 +542,15 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
Code::StubType type =
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
- Code::KEYED_STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
+ Code::KEYED_STORE_IC, strict_mode, type);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedStoreStubCompiler compiler(isolate(), strict_mode,
DO_NOT_ALLOW_JSARRAY_GROWTH);
Handle<Code> code =
compiler.CompileStoreField(receiver, field_index, transition, name);
- PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code));
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -595,33 +569,44 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// Compute check type based on receiver/holder.
CheckType check = RECEIVER_MAP_CHECK;
if (object->IsString()) {
check = STRING_CHECK;
+ } else if (object->IsSymbol()) {
+ check = SYMBOL_CHECK;
} else if (object->IsNumber()) {
check = NUMBER_CHECK;
} else if (object->IsBoolean()) {
check = BOOLEAN_CHECK;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ if (check != RECEIVER_MAP_CHECK &&
+ !function->IsBuiltin() &&
+ function->shared()->is_classic_mode()) {
+ // Calling non-strict non-builtins with a value as the receiver
+ // requires boxing.
+ return Handle<Code>::null();
+ }
+
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::CONSTANT_FUNCTION, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
Handle<Code> code =
- compiler.CompileCallConstant(object, holder, function, name, check);
+ compiler.CompileCallConstant(object, holder, name, check, function);
code->set_check_type(check);
ASSERT_EQ(flags, code->flags());
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -632,23 +617,25 @@ Handle<Code> StubCache::ComputeCallField(int argc,
Handle<String> name,
Handle<Object> object,
Handle<JSObject> holder,
- int index) {
+ PropertyIndex index) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
// map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ if (object->IsNumber() || object->IsSymbol() ||
+ object->IsBoolean() || object->IsString()) {
object = holder;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::FIELD, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -659,7 +646,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
PROFILE(isolate_,
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -673,19 +660,21 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *object, cache_holder));
// TODO(1233596): We cannot do receiver map check for non-JS objects
// because they may be represented as immediates without a
// map. Instead, we check against the map in the holder.
- if (object->IsNumber() || object->IsBoolean() || object->IsString()) {
+ if (object->IsNumber() || object->IsSymbol() ||
+ object->IsBoolean() || object->IsString()) {
object = holder;
}
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::INTERCEPTOR, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -696,7 +685,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -711,11 +700,12 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
Handle<JSFunction> function) {
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*receiver, *holder);
- Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder));
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
- cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
+ Handle<JSObject> stub_holder(IC::GetCodeCacheHolder(
+ isolate_, *receiver, cache_holder));
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ kind, extra_state, Code::NORMAL, argc, cache_holder);
+ Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags),
+ isolate_);
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -725,7 +715,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code));
- JSObject::UpdateMapCodeCache(map_holder, name, code);
+ JSObject::UpdateMapCodeCache(stub_holder, name, code);
return code;
}
@@ -819,17 +809,17 @@ Handle<Code> StubCache::ComputeCallNormal(int argc,
if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
StubCompiler compiler(isolate_);
- Handle<Code> code = compiler.CompileCallNormal(flags, has_qml_global_receiver);
+ Handle<Code> code = compiler.CompileCallNormal(flags,
+ has_qml_global_receiver);
FillCache(isolate_, code);
return code;
}
-Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) {
- ASSERT(kind == Code::KEYED_CALL_IC);
+Handle<Code> StubCache::ComputeCallArguments(int argc) {
Code::Flags flags =
- Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState,
- Code::NORMAL, argc);
+ Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC,
+ Code::kNoExtraICState, Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
int entry = cache->FindEntry(isolate_, flags);
@@ -881,13 +871,65 @@ Handle<Code> StubCache::ComputeCallMiss(int argc,
}
+Handle<Code> StubCache::ComputeLoadElementPolymorphic(
+ MapHandleList* receiver_maps) {
+ Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
+ Handle<PolymorphicCodeCache> cache =
+ isolate_->factory()->polymorphic_code_cache();
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ CodeHandleList handlers(receiver_maps->length());
+ KeyedLoadStubCompiler compiler(isolate_);
+ compiler.CompileElementHandlers(receiver_maps, &handlers);
+ Handle<Code> code = compiler.CompilePolymorphicIC(
+ receiver_maps, &handlers, factory()->empty_string(),
+ Code::NORMAL, ELEMENT);
+
+ isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
+
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
+Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<String> name) {
+ LoadStubCompiler ic_compiler(isolate_);
+ Handle<Code> ic = ic_compiler.CompilePolymorphicIC(
+ receiver_maps, handlers, name, Code::NORMAL, PROPERTY);
+ return ic;
+}
+
+
+Handle<Code> StubCache::ComputeStoreElementPolymorphic(
+ MapHandleList* receiver_maps,
+ KeyedAccessGrowMode grow_mode,
+ StrictModeFlag strict_mode) {
+ Handle<PolymorphicCodeCache> cache =
+ isolate_->factory()->polymorphic_code_cache();
+ Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode,
+ strict_mode);
+ Code::Flags flags =
+ Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
+ Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+ if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+ KeyedStoreStubCompiler compiler(isolate_, strict_mode, grow_mode);
+ Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
+ PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+ return code;
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Code> StubCache::ComputeCallDebugBreak(int argc,
Code::Kind kind) {
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState,
+ Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK,
Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
@@ -906,7 +948,7 @@ Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc,
// Extra IC state is irrelevant for debug break ICs. They jump to
// the actual call ic to carry out the work.
Code::Flags flags =
- Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState,
+ Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN,
Code::NORMAL, argc);
Handle<UnseededNumberDictionary> cache =
isolate_->factory()->non_monomorphic_cache();
@@ -984,35 +1026,9 @@ void StubCache::CollectMatchingMaps(SmallMapList* types,
// StubCompiler implementation.
-RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) {
- ASSERT(args[0]->IsJSObject());
- ASSERT(args[1]->IsJSObject());
- ASSERT(args[3]->IsSmi());
- AccessorInfo* callback = AccessorInfo::cast(args[4]);
- Address getter_address = v8::ToCData<Address>(callback->getter());
- v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address);
- ASSERT(fun != NULL);
- ASSERT(callback->IsCompatibleReceiver(args[0]));
- v8::AccessorInfo info(&args[0]);
- HandleScope scope(isolate);
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- ExternalCallbackScope call_scope(isolate, getter_address);
- result = fun(v8::Utils::ToLocal(args.at<String>(5)), info);
- }
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (result.IsEmpty()) return HEAP->undefined_value();
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) {
JSObject* recv = JSObject::cast(args[0]);
- AccessorInfo* callback = AccessorInfo::cast(args[1]);
+ ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]);
Address setter_address = v8::ToCData<Address>(callback->setter());
v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address);
ASSERT(fun != NULL);
@@ -1081,21 +1097,21 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) {
}
-static MaybeObject* ThrowReferenceError(String* name) {
+static MaybeObject* ThrowReferenceError(Isolate* isolate, String* name) {
// If the load is non-contextual, just return the undefined result.
// Note that both keyed and non-keyed loads may end up here, so we
// can't use either LoadIC or KeyedLoadIC constructors.
- IC ic(IC::NO_EXTRA_FRAME, Isolate::Current());
+ IC ic(IC::NO_EXTRA_FRAME, isolate);
ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub());
- if (!ic.SlowIsContextual()) return HEAP->undefined_value();
+ if (!ic.SlowIsUndeclaredGlobal()) return HEAP->undefined_value();
// Throw a reference error.
- HandleScope scope;
+ HandleScope scope(isolate);
Handle<String> name_handle(name);
Handle<Object> error =
FACTORY->NewReferenceError("not_defined",
HandleVector(&name_handle, 1));
- return Isolate::Current()->Throw(*error);
+ return isolate->Throw(*error);
}
@@ -1157,7 +1173,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) {
// If the property is present, return it.
if (attr != ABSENT) return result;
- return ThrowReferenceError(String::cast(args[0]));
+ return ThrowReferenceError(isolate, String::cast(args[0]));
}
@@ -1235,7 +1251,8 @@ Handle<Code> StubCompiler::CompileCallPreMonomorphic(Code::Flags flags) {
}
-Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags, bool has_qml_global_receiver) {
+Handle<Code> StubCompiler::CompileCallNormal(Code::Flags flags,
+ bool has_qml_global_receiver) {
int argc = Code::ExtractArgumentsCountFromFlags(flags);
Code::Kind kind = Code::ExtractKindFromFlags(flags);
if (kind == Code::CALL_IC) {
@@ -1376,32 +1393,230 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
}
-Handle<Code> LoadStubCompiler::GetCode(Code::StubType type,
- Handle<String> name) {
- Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type);
- Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name));
+#define __ ACCESS_MASM(masm())
+
+
+Register BaseLoadStubCompiler::HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* miss) {
+ // Check the prototype chain.
+ return CheckPrototypes(object, object_reg, holder,
+ scratch1(), scratch2(), scratch3(),
+ name, miss, SKIP_RECEIVER);
+}
+
+
+Register BaseLoadStubCompiler::HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
+}
+
+
+Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ PropertyIndex field) {
+ Label miss;
+
+ Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss);
+
+ LoadFieldStub stub(reg, field.is_inobject(holder), field.translate(holder));
+ GenerateTailCall(stub.GetCode(isolate()));
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), kind());
+
+ // Return the generated code.
+ return GetCode(Code::HANDLER_FRAGMENT, Code::FIELD, name);
+}
+
+
+Handle<Code> BaseLoadStubCompiler::CompileLoadConstant(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Handle<JSFunction> value) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+ GenerateLoadConstant(value);
+
+ // Return the generated code.
+ return GetCode(Code::HANDLER_FRAGMENT, Code::CONSTANT_FUNCTION, name);
+}
+
+
+Handle<Code> BaseLoadStubCompiler::CompileLoadCallback(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label success;
+
+ Register reg = CallbackHandlerFrontend(
+ object, receiver(), holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(reg, callback);
+
+ // Return the generated code.
+ return GetCode(Code::HANDLER_FRAGMENT, Code::CALLBACKS, name);
+}
+
+
+Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name) {
+ Label success;
+
+ LookupResult lookup(isolate());
+ LookupPostInterceptor(holder, name, &lookup);
+
+ Register reg = HandlerFrontend(object, receiver(), holder, name, &success);
+ __ bind(&success);
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptor(reg, object, holder, &lookup, name);
+
+ // Return the generated code.
+ return GetCode(Code::HANDLER_FRAGMENT, Code::INTERCEPTOR, name);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadPostInterceptor(
+ Register interceptor_reg,
+ Handle<JSObject> interceptor_holder,
+ Handle<String> name,
+ LookupResult* lookup) {
+ Label success;
+ Handle<JSObject> holder(lookup->holder());
+ if (lookup->IsField()) {
+ PropertyIndex field = lookup->GetFieldIndex();
+ if (interceptor_holder.is_identical_to(holder)) {
+ LoadFieldStub stub(interceptor_reg,
+ field.is_inobject(holder),
+ field.translate(holder));
+ GenerateTailCall(stub.GetCode(isolate()));
+ } else {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ Register reg = HandlerFrontend(
+ interceptor_holder, interceptor_reg, holder, name, &success);
+ __ bind(&success);
+ GenerateLoadField(reg, holder, field);
+ }
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ Handle<ExecutableAccessorInfo> callback(
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
+ ASSERT(callback->getter() != NULL);
+
+ Register reg = CallbackHandlerFrontend(
+ interceptor_holder, interceptor_reg, holder, name, &success, callback);
+ __ bind(&success);
+ GenerateLoadCallback(reg, callback);
+ }
+}
+
+
+Handle<Code> BaseLoadStubCompiler::CompileMonomorphicIC(
+ Handle<Map> receiver_map,
+ Handle<Code> handler,
+ Handle<String> name) {
+ MapHandleList receiver_maps(1);
+ receiver_maps.Add(receiver_map);
+ CodeHandleList handlers(1);
+ handlers.Add(handler);
+ Code::StubType type = handler->type();
+ return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY);
+}
+
+
+Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Handle<JSFunction> getter) {
+ Label success;
+ HandlerFrontend(object, receiver(), holder, name, &success);
+
+ __ bind(&success);
+ GenerateLoadViaGetter(masm(), getter);
+
+ // Return the generated code.
+ return GetCode(Code::HANDLER_FRAGMENT, Code::CALLBACKS, name);
+}
+
+
+#undef __
+
+
+void LoadStubCompiler::JitEvent(Handle<String> name, Handle<Code> code) {
GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
- return code;
}
-Handle<Code> KeyedLoadStubCompiler::GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state) {
- Code::Flags flags = Code::ComputeFlags(
- Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type);
+void KeyedLoadStubCompiler::JitEvent(Handle<String> name, Handle<Code> code) {
+ GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code));
+}
+
+
+Handle<Code> BaseLoadStubCompiler::GetCode(Code::IcFragment fragment,
+ Code::StubType type,
+ Handle<String> name,
+ InlineCacheState state) {
+ Code::Flags flags = Code::ComputeFlags(kind(), state, fragment, type);
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name));
- GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code));
+ PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+ JitEvent(name, code);
return code;
}
+void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers) {
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map = receiver_maps->at(i);
+ Handle<Code> cached_stub;
+
+ if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+ cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+ } else {
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+
+ if (IsFastElementsKind(elements_kind) ||
+ IsExternalArrayElementsKind(elements_kind)) {
+ cached_stub =
+ KeyedLoadFastElementStub(is_js_array,
+ elements_kind).GetCode(isolate());
+ } else {
+ ASSERT(elements_kind == DICTIONARY_ELEMENTS);
+ cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate());
+ }
+ }
+
+ handlers->Add(cached_stub);
+ }
+}
+
+
Handle<Code> StoreStubCompiler::GetCode(Code::StubType type,
Handle<String> name) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(
+ Code::STORE_IC, strict_mode_, type);
Handle<Code> code = GetCodeWithFlags(flags, name);
PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name));
GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code));
@@ -1423,6 +1638,50 @@ Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type,
}
+Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
+ MapHandleList* receiver_maps) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ Handle<Map> receiver_map(receiver_maps->at(i));
+ Handle<Code> cached_stub;
+ Handle<Map> transitioned_map =
+ receiver_map->FindTransitionedMap(receiver_maps);
+
+ // TODO(mvstanton): The code below is doing pessimistic elements
+ // transitions. I would like to stop doing that and rely on Allocation Site
+ // Tracking to do a better job of ensuring the data types are what they need
+ // to be. Not all the elements are in place yet, pessimistic elements
+ // transitions are still important for performance.
+ bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+ ElementsKind elements_kind = receiver_map->elements_kind();
+ if (!transitioned_map.is_null()) {
+ cached_stub = ElementsTransitionAndStoreStub(
+ elements_kind,
+ transitioned_map->elements_kind(),
+ is_js_array,
+ strict_mode_,
+ grow_mode_).GetCode(isolate());
+ } else {
+ cached_stub = KeyedStoreElementStub(
+ is_js_array,
+ elements_kind,
+ grow_mode_).GetCode(isolate());
+ }
+ ASSERT(!cached_stub.is_null());
+ handlers.Add(cached_stub);
+ transitioned_maps.Add(transitioned_map);
+ }
+ Handle<Code> code =
+ CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps);
+ isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+ PROFILE(isolate(),
+ CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0));
+ return code;
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
MacroAssembler* masm) {
KeyedStoreIC::GenerateSlow(masm);
@@ -1496,10 +1755,10 @@ Handle<Code> CallStubCompiler::GetCode(Code::StubType type,
Handle<String> name) {
int argc = arguments_.immediate();
Code::Flags flags = Code::ComputeMonomorphicFlags(kind_,
- type,
extra_state_,
- cache_holder_,
- argc);
+ type,
+ argc,
+ cache_holder_);
return GetCodeWithFlags(flags, name);
}
@@ -1547,6 +1806,7 @@ int CallOptimization::GetPrototypeDepthOfExpectedType(
while (!object.is_identical_to(holder)) {
if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
+ if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
++depth;
}
if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
diff --git a/src/3rdparty/v8/src/stub-cache.h b/src/3rdparty/v8/src/stub-cache.h
index ec9274b..e8eb6cf 100644
--- a/src/3rdparty/v8/src/stub-cache.h
+++ b/src/3rdparty/v8/src/stub-cache.h
@@ -74,40 +74,61 @@ class StubCache {
void Initialize();
+ Handle<JSObject> StubHolder(Handle<JSObject> receiver,
+ Handle<JSObject> holder);
+
+ Handle<Code> FindStub(Handle<String> name,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::StubType type,
+ Code::IcFragment fragment);
+
+ Handle<Code> FindHandler(Handle<String> name,
+ Handle<JSObject> stub_holder,
+ Code::Kind kind,
+ Code::StubType type);
+
+ Handle<Code> ComputeMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
+ Handle<Code> ComputeKeyedMonomorphicIC(Handle<JSObject> receiver,
+ Handle<Code> handler,
+ Handle<String> name);
// Computes the right stub matching. Inserts the result in the
// cache before returning. This might compile a stub if needed.
Handle<Code> ComputeLoadNonexistent(Handle<String> name,
- Handle<JSObject> receiver);
+ Handle<JSObject> object);
Handle<Code> ComputeLoadField(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- int field_index);
+ PropertyIndex field_index);
Handle<Code> ComputeLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<ExecutableAccessorInfo> callback);
Handle<Code> ComputeLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> getter);
Handle<Code> ComputeLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value);
Handle<Code> ComputeLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder);
- Handle<Code> ComputeLoadNormal();
+ Handle<Code> ComputeLoadNormal(Handle<String> name,
+ Handle<JSObject> object);
Handle<Code> ComputeLoadGlobal(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
bool is_dont_delete);
@@ -115,37 +136,29 @@ class StubCache {
// ---
Handle<Code> ComputeKeyedLoadField(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- int field_index);
+ PropertyIndex field_index);
- Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<Code> ComputeKeyedLoadCallback(
+ Handle<String> name,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<ExecutableAccessorInfo> callback);
Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> value);
Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder);
- Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name,
- Handle<JSArray> receiver);
-
- Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name,
- Handle<String> receiver);
-
- Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name,
- Handle<JSFunction> receiver);
-
// ---
Handle<Code> ComputeStoreField(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode);
@@ -153,37 +166,40 @@ class StubCache {
Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
Handle<Code> ComputeStoreGlobal(Handle<String> name,
- Handle<GlobalObject> receiver,
+ Handle<GlobalObject> object,
Handle<JSGlobalPropertyCell> cell,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback,
+ Handle<ExecutableAccessorInfo> callback,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> setter,
StrictModeFlag strict_mode);
Handle<Code> ComputeStoreInterceptor(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
StrictModeFlag strict_mode);
// ---
Handle<Code> ComputeKeyedStoreField(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
int field_index,
Handle<Map> transition,
StrictModeFlag strict_mode);
- Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<Map> receiver_map,
- KeyedIC::StubKind stub_kind,
- StrictModeFlag strict_mode);
+ Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
+
+ Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
+ KeyedStoreIC::StubKind stub_kind,
+ StrictModeFlag strict_mode,
+ KeyedAccessGrowMode grow_mode);
// ---
@@ -193,7 +209,7 @@ class StubCache {
Handle<String> name,
Handle<Object> object,
Handle<JSObject> holder,
- int index);
+ PropertyIndex index);
Handle<Code> ComputeCallConstant(int argc,
Code::Kind,
@@ -214,7 +230,7 @@ class StubCache {
Code::Kind,
Code::ExtraICState extra_state,
Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<GlobalObject> holder,
Handle<JSGlobalPropertyCell> cell,
Handle<JSFunction> function);
@@ -234,7 +250,7 @@ class StubCache {
Code::ExtraICState state,
bool has_qml_global_receiver);
- Handle<Code> ComputeCallArguments(int argc, Code::Kind kind);
+ Handle<Code> ComputeCallArguments(int argc);
Handle<Code> ComputeCallMegamorphic(int argc,
Code::Kind kind,
@@ -244,6 +260,17 @@ class StubCache {
Code::Kind kind,
Code::ExtraICState state);
+ // ---
+
+ Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
+ Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
+ KeyedAccessGrowMode grow_mode,
+ StrictModeFlag strict_mode);
+
+ Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<String> name);
+
// Finds the Code object stored in the Heap::non_monomorphic_cache().
Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind);
@@ -401,7 +428,6 @@ class StubCache {
// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty);
@@ -414,6 +440,10 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty);
DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor);
+enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
+enum IcCheckType { ELEMENT, PROPERTY };
+
+
// The stub compilers compile stubs for the stub cache.
class StubCompiler BASE_EMBEDDED {
public:
@@ -424,7 +454,8 @@ class StubCompiler BASE_EMBEDDED {
// is extracted from the code flags.
Handle<Code> CompileCallInitialize(Code::Flags flags);
Handle<Code> CompileCallPreMonomorphic(Code::Flags flags);
- Handle<Code> CompileCallNormal(Code::Flags flags, bool has_qml_global_receiver);
+ Handle<Code> CompileCallNormal(Code::Flags flags,
+ bool has_qml_global_receiver);
Handle<Code> CompileCallMegamorphic(Code::Flags flags);
Handle<Code> CompileCallArguments(Code::Flags flags);
Handle<Code> CompileCallMiss(Code::Flags flags);
@@ -454,7 +485,12 @@ class StubCompiler BASE_EMBEDDED {
Register dst,
Register src,
Handle<JSObject> holder,
- int index);
+ PropertyIndex index);
+ static void DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -485,8 +521,8 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
- static void GenerateLoadMiss(MacroAssembler* masm,
- Code::Kind kind);
+ static void GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind);
+ static void GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind);
static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm);
@@ -512,9 +548,10 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Handle<String> name,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS) {
return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1,
- scratch2, name, kInvalidProtoDepth, miss);
+ scratch2, name, kInvalidProtoDepth, miss, check);
}
Register CheckPrototypes(Handle<JSObject> object,
@@ -525,7 +562,8 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Handle<String> name,
int save_at_depth,
- Label* miss);
+ Label* miss,
+ PrototypeCheckType check = CHECK_ALL_MAPS);
protected:
@@ -535,58 +573,6 @@ class StubCompiler BASE_EMBEDDED {
MacroAssembler* masm() { return &masm_; }
void set_failure(Failure* failure) { failure_ = failure; }
- void GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss);
-
- void GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss);
-
- void GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss);
-
static void LookupPostInterceptor(Handle<JSObject> holder,
Handle<String> name,
LookupResult* lookup);
@@ -595,6 +581,8 @@ class StubCompiler BASE_EMBEDDED {
Heap* heap() { return isolate()->heap(); }
Factory* factory() { return isolate()->factory(); }
+ void GenerateTailCall(Handle<Code> code);
+
private:
Isolate* isolate_;
MacroAssembler masm_;
@@ -602,99 +590,171 @@ class StubCompiler BASE_EMBEDDED {
};
-class LoadStubCompiler: public StubCompiler {
- public:
- explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
- Handle<Code> CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last);
+
+class BaseLoadStubCompiler: public StubCompiler {
+ public:
+ BaseLoadStubCompiler(Isolate* isolate, Register* registers)
+ : StubCompiler(isolate), registers_(registers) { }
+ virtual ~BaseLoadStubCompiler() { }
Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
- Handle<String> name);
+ Handle<String> name,
+ PropertyIndex index);
- Handle<Code> CompileLoadCallback(Handle<String> name,
- Handle<JSObject> object,
+ Handle<Code> CompileLoadCallback(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
-
- static void GenerateLoadViaGetter(MacroAssembler* masm,
- Handle<JSFunction> getter);
-
- Handle<Code> CompileLoadViaGetter(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter);
+ Handle<String> name,
+ Handle<ExecutableAccessorInfo> callback);
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name);
+ Handle<String> name,
+ Handle<JSFunction> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
Handle<String> name);
- Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
- Handle<GlobalObject> holder,
- Handle<JSGlobalPropertyCell> cell,
+ Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map,
+ Handle<Code> handler,
+ Handle<String> name);
+ Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps,
+ CodeHandleList* handlers,
+ Handle<String> name,
+ Code::StubType type,
+ IcCheckType check);
+
+ protected:
+ Register HandlerFrontendHeader(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
Handle<String> name,
- bool is_dont_delete);
+ Label* success);
+ void HandlerFrontendFooter(Label* success, Label* miss);
+
+ Register HandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success);
+ Register CallbackHandlerFrontend(Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback);
+ void NonexistentHandlerFrontend(Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Label* success,
+ Handle<GlobalObject> global);
+
+ void GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index);
+ void GenerateLoadConstant(Handle<JSFunction> value);
+ void GenerateLoadCallback(Register reg,
+ Handle<ExecutableAccessorInfo> callback);
+ void GenerateLoadInterceptor(Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> holder,
+ LookupResult* lookup,
+ Handle<String> name);
+ void GenerateLoadPostInterceptor(Register reg,
+ Handle<JSObject> interceptor_holder,
+ Handle<String> name,
+ LookupResult* lookup);
+
+ Handle<Code> GetCode(Code::IcFragment fragment,
+ Code::StubType type,
+ Handle<String> name,
+ InlineCacheState state = MONOMORPHIC);
+
+ Register receiver() { return registers_[0]; }
+ Register name() { return registers_[1]; }
+ Register scratch1() { return registers_[2]; }
+ Register scratch2() { return registers_[3]; }
+ Register scratch3() { return registers_[4]; }
+ Register scratch4() { return registers_[5]; }
private:
- Handle<Code> GetCode(Code::StubType type, Handle<String> name);
+ virtual Code::Kind kind() = 0;
+ virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0;
+ virtual void JitEvent(Handle<String> name, Handle<Code> code) = 0;
+ virtual void GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss) { }
+ Register* registers_;
};
-class KeyedLoadStubCompiler: public StubCompiler {
+class LoadStubCompiler: public BaseLoadStubCompiler {
public:
- explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { }
+ explicit LoadStubCompiler(Isolate* isolate)
+ : BaseLoadStubCompiler(isolate, registers()) { }
- Handle<Code> CompileLoadField(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- int index);
-
- Handle<Code> CompileLoadCallback(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<Code> CompileLoadNonexistent(Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Handle<GlobalObject> global);
- Handle<Code> CompileLoadConstant(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value);
+ static void GenerateLoadViaGetter(MacroAssembler* masm,
+ Handle<JSFunction> getter);
- Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<String> name);
+ Handle<Code> CompileLoadViaGetter(Handle<JSObject> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Handle<JSFunction> getter);
- Handle<Code> CompileLoadArrayLength(Handle<String> name);
+ Handle<Code> CompileLoadGlobal(Handle<JSObject> object,
+ Handle<GlobalObject> holder,
+ Handle<JSGlobalPropertyCell> cell,
+ Handle<String> name,
+ bool is_dont_delete);
- Handle<Code> CompileLoadStringLength(Handle<String> name);
+ static Register receiver() { return registers()[0]; }
- Handle<Code> CompileLoadFunctionPrototype(Handle<String> name);
+ private:
+ static Register* registers();
+ virtual Code::Kind kind() { return Code::LOAD_IC; }
+ virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
+ }
+ virtual void JitEvent(Handle<String> name, Handle<Code> code);
+};
- Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
- Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps,
- CodeHandleList* handler_ics);
+class KeyedLoadStubCompiler: public BaseLoadStubCompiler {
+ public:
+ explicit KeyedLoadStubCompiler(Isolate* isolate)
+ : BaseLoadStubCompiler(isolate, registers()) { }
- static void GenerateLoadExternalArray(MacroAssembler* masm,
- ElementsKind elements_kind);
+ Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
- static void GenerateLoadFastElement(MacroAssembler* masm);
+ void CompileElementHandlers(MapHandleList* receiver_maps,
+ CodeHandleList* handlers);
- static void GenerateLoadFastDoubleElement(MacroAssembler* masm);
+ Handle<Code> CompileLoadElementPolymorphic(MapHandleList* receiver_maps);
static void GenerateLoadDictionaryElement(MacroAssembler* masm);
+ static Register receiver() { return registers()[0]; }
+
private:
- Handle<Code> GetCode(Code::StubType type,
- Handle<String> name,
- InlineCacheState state = MONOMORPHIC);
+ static Register* registers();
+ virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; }
+ virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+ return code->ic_state() == MONOMORPHIC
+ ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+ }
+ virtual void JitEvent(Handle<String> name, Handle<Code> code);
+ virtual void GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss);
};
@@ -710,15 +770,15 @@ class StoreStubCompiler: public StubCompiler {
Handle<String> name);
Handle<Code> CompileStoreCallback(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback);
+ Handle<ExecutableAccessorInfo> callback);
static void GenerateStoreViaSetter(MacroAssembler* masm,
Handle<JSFunction> setter);
Handle<Code> CompileStoreViaSetter(Handle<String> name,
- Handle<JSObject> receiver,
+ Handle<JSObject> object,
Handle<JSObject> holder,
Handle<JSFunction> setter);
@@ -756,6 +816,8 @@ class KeyedStoreStubCompiler: public StubCompiler {
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps);
+ Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
+
static void GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array,
ElementsKind element_kind,
@@ -804,14 +866,22 @@ class CallStubCompiler: public StubCompiler {
Handle<Code> CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name);
+ void CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Label* success);
+
+ void CompileHandlerBackend(Handle<JSFunction> function);
+
Handle<Code> CompileCallConstant(Handle<Object> object,
Handle<JSObject> holder,
- Handle<JSFunction> function,
Handle<String> name,
- CheckType check);
+ CheckType check,
+ Handle<JSFunction> function);
Handle<Code> CompileCallInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
diff --git a/src/3rdparty/v8/src/sweeper-thread.cc b/src/3rdparty/v8/src/sweeper-thread.cc
new file mode 100644
index 0000000..f08fcfb
--- /dev/null
+++ b/src/3rdparty/v8/src/sweeper-thread.cc
@@ -0,0 +1,103 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kSweeperThreadStackSize = 64 * KB;
+
+SweeperThread::SweeperThread(Isolate* isolate)
+ : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+ isolate_(isolate),
+ heap_(isolate->heap()),
+ collector_(heap_->mark_compact_collector()),
+ start_sweeping_semaphore_(OS::CreateSemaphore(0)),
+ end_sweeping_semaphore_(OS::CreateSemaphore(0)),
+ stop_semaphore_(OS::CreateSemaphore(0)),
+ free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+ private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ private_free_list_old_pointer_space_(
+ heap_->paged_space(OLD_POINTER_SPACE)) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+void SweeperThread::Run() {
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ while (true) {
+ start_sweeping_semaphore_->Wait();
+
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ return;
+ }
+
+ collector_->SweepInParallel(heap_->old_data_space(),
+ &private_free_list_old_data_space_,
+ &free_list_old_data_space_);
+ collector_->SweepInParallel(heap_->old_pointer_space(),
+ &private_free_list_old_pointer_space_,
+ &free_list_old_pointer_space_);
+ end_sweeping_semaphore_->Signal();
+ }
+}
+
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+ if (space->identity() == OLD_POINTER_SPACE) {
+ return space->free_list()->Concatenate(&free_list_old_pointer_space_);
+ } else if (space->identity() == OLD_DATA_SPACE) {
+ return space->free_list()->Concatenate(&free_list_old_data_space_);
+ }
+ return 0;
+}
+
+
+void SweeperThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ start_sweeping_semaphore_->Signal();
+ stop_semaphore_->Wait();
+}
+
+
+void SweeperThread::StartSweeping() {
+ start_sweeping_semaphore_->Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+ end_sweeping_semaphore_->Wait();
+}
+} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/sweeper-thread.h b/src/3rdparty/v8/src/sweeper-thread.h
new file mode 100644
index 0000000..a170982
--- /dev/null
+++ b/src/3rdparty/v8/src/sweeper-thread.h
@@ -0,0 +1,75 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+ explicit SweeperThread(Isolate* isolate);
+
+ void Run();
+ void Stop();
+ void StartSweeping();
+ void WaitForSweeperThread();
+ intptr_t StealMemory(PagedSpace* space);
+
+ ~SweeperThread() {
+ delete start_sweeping_semaphore_;
+ delete end_sweeping_semaphore_;
+ delete stop_semaphore_;
+ }
+
+ private:
+ Isolate* isolate_;
+ Heap* heap_;
+ MarkCompactCollector* collector_;
+ Semaphore* start_sweeping_semaphore_;
+ Semaphore* end_sweeping_semaphore_;
+ Semaphore* stop_semaphore_;
+ FreeList free_list_old_data_space_;
+ FreeList free_list_old_pointer_space_;
+ FreeList private_free_list_old_data_space_;
+ FreeList private_free_list_old_pointer_space_;
+ volatile AtomicWord stop_thread_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SWEEPER_THREAD_H_
diff --git a/src/3rdparty/v8/src/symbol.js b/src/3rdparty/v8/src/symbol.js
new file mode 100644
index 0000000..b7f9dc9
--- /dev/null
+++ b/src/3rdparty/v8/src/symbol.js
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+var $Symbol = function() { return %CreateSymbol() }
+global.Symbol = $Symbol
+
+// Symbols only have a toString method and no prototype.
+var SymbolDelegate = {
+ __proto__: null,
+ toString: $Object.prototype.toString
+}
+
+$Object.freeze(SymbolDelegate)
diff --git a/src/3rdparty/v8/src/token.h b/src/3rdparty/v8/src/token.h
index 863ba62..4078a15 100644
--- a/src/3rdparty/v8/src/token.h
+++ b/src/3rdparty/v8/src/token.h
@@ -230,26 +230,30 @@ class Token {
case EQ: return NE;
case NE: return EQ;
case EQ_STRICT: return NE_STRICT;
+ case NE_STRICT: return EQ_STRICT;
case LT: return GTE;
case GT: return LTE;
case LTE: return GT;
case GTE: return LT;
default:
+ UNREACHABLE();
return op;
}
}
- static Value InvertCompareOp(Value op) {
+ static Value ReverseCompareOp(Value op) {
ASSERT(IsCompareOp(op));
switch (op) {
- case EQ: return NE;
- case NE: return EQ;
- case EQ_STRICT: return NE_STRICT;
+ case EQ: return EQ;
+ case NE: return NE;
+ case EQ_STRICT: return EQ_STRICT;
+ case NE_STRICT: return NE_STRICT;
case LT: return GT;
case GT: return LT;
case LTE: return GTE;
case GTE: return LTE;
default:
+ UNREACHABLE();
return op;
}
}
diff --git a/src/3rdparty/v8/src/type-info.cc b/src/3rdparty/v8/src/type-info.cc
index bc6a46b..62ca324 100644
--- a/src/3rdparty/v8/src/type-info.cc
+++ b/src/3rdparty/v8/src/type-info.cc
@@ -79,7 +79,7 @@ static uint32_t IdToKey(TypeFeedbackId ast_id) {
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry))
+ ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
: Handle<Object>::cast(isolate_->factory()->undefined_value());
}
@@ -111,14 +111,11 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) {
}
-bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) {
+bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) {
Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId());
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
- return code->is_keyed_load_stub() &&
- *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) &&
- code->ic_state() == MEGAMORPHIC;
+ return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC;
}
return false;
}
@@ -145,19 +142,15 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) {
}
-bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) {
+bool TypeFeedbackOracle::StoreIsPolymorphic(TypeFeedbackId ast_id) {
Handle<Object> map_or_code = GetInfo(ast_id);
if (map_or_code->IsCode()) {
Handle<Code> code = Handle<Code>::cast(map_or_code);
- Builtins* builtins = isolate_->builtins();
bool allow_growth =
Code::GetKeyedAccessGrowMode(code->extra_ic_state()) ==
ALLOW_JSARRAY_GROWTH;
- return code->is_keyed_store_stub() &&
- !allow_growth &&
- *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) &&
- *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) &&
- code->ic_state() == MEGAMORPHIC;
+ return code->is_keyed_store_stub() && !allow_growth &&
+ code->ic_state() == POLYMORPHIC;
}
return false;
}
@@ -170,8 +163,13 @@ bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) {
bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) {
- Handle<Object> value = GetInfo(expr->CallNewFeedbackId());
- return value->IsJSFunction();
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
+ LAST_FAST_ELEMENTS_KIND);
+ return Isolate::Current()->global_context()->array_function();
+ }
+ return info->IsJSFunction();
}
@@ -223,8 +221,7 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType(
void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC);
CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types);
}
@@ -232,8 +229,7 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr,
void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr,
Handle<String> name,
SmallMapList* types) {
- Code::Flags flags =
- Code::ComputeMonomorphicFlags(Code::STORE_IC, Code::NORMAL);
+ Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC);
CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types);
}
@@ -250,10 +246,10 @@ void TypeFeedbackOracle::CallReceiverTypes(Call* expr,
CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC,
- Code::NORMAL,
extra_ic_state,
- OWN_MAP,
- arity);
+ Code::NORMAL,
+ arity,
+ OWN_MAP);
CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types);
}
@@ -274,6 +270,8 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck(
case RECEIVER_MAP_CHECK:
UNREACHABLE();
break;
+ case SYMBOL_CHECK:
+ return Handle<JSObject>(native_context_->symbol_delegate());
case STRING_CHECK:
function = native_context_->string_function();
break;
@@ -295,10 +293,33 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) {
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) {
- return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId()));
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <=
+ LAST_FAST_ELEMENTS_KIND);
+ return Handle<JSFunction>(Isolate::Current()->global_context()->
+ array_function());
+ } else {
+ return Handle<JSFunction>::cast(info);
+ }
}
+ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) {
+ Handle<Object> info = GetInfo(expr->CallNewFeedbackId());
+ if (info->IsSmi()) {
+ return static_cast<ElementsKind>(Smi::cast(*info)->value());
+ } else {
+ // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf
+ // reasons. Is there a better fix?
+ if (FLAG_packed_arrays) {
+ return FAST_SMI_ELEMENTS;
+ } else {
+ return FAST_HOLEY_SMI_ELEMENTS;
+ }
+ }
+}
+
Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap(
ObjectLiteral::Property* prop) {
ASSERT(ObjectLiteralStoreIsMonomorphic(prop));
@@ -312,43 +333,63 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
}
-TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
- Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
+bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) {
+ Handle<Object> object = GetInfo(expr->PropertyFeedbackId());
+ if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return unknown;
+ if (!code->is_load_stub()) return false;
+ if (code->ic_state() != MONOMORPHIC) return false;
+ return stub->Describes(*code);
+}
+
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+static TypeInfo TypeFromCompareType(CompareIC::State state) {
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
return TypeInfo::Uninitialized();
- case CompareIC::SMIS:
+ case CompareIC::SMI:
return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBERS:
+ case CompareIC::NUMBER:
return TypeInfo::Number();
- case CompareIC::SYMBOLS:
- case CompareIC::STRINGS:
+ case CompareIC::INTERNALIZED_STRING:
+ return TypeInfo::InternalizedString();
+ case CompareIC::STRING:
return TypeInfo::String();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
+ case CompareIC::OBJECT:
+ case CompareIC::KNOWN_OBJECT:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
default:
- return unknown;
+ return TypeInfo::Unknown();
}
}
-bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
+void TypeFeedbackOracle::CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type) {
Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- if (!object->IsCode()) return false;
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) return false;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- return state == CompareIC::SYMBOLS;
+ if (!code->is_compare_ic_stub()) {
+ *left_type = *right_type = *overall_type = unknown;
+ return;
+ }
+
+ int stub_minor_key = code->stub_info();
+ CompareIC::State left_state, right_state, handler_state;
+ ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
+ &handler_state, NULL);
+ *left_type = TypeFromCompareType(left_state);
+ *right_type = TypeFromCompareType(right_state);
+ *overall_type = TypeFromCompareType(handler_state);
}
@@ -357,8 +398,8 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- if (state != CompareIC::KNOWN_OBJECTS) {
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ if (state != CompareIC::KNOWN_OBJECT) {
return Handle<Map>::null();
}
Map* first_map = code->FindFirstMap();
@@ -380,7 +421,7 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
switch (type) {
case UnaryOpIC::SMI:
return TypeInfo::Smi();
- case UnaryOpIC::HEAP_NUMBER:
+ case UnaryOpIC::NUMBER:
return TypeInfo::Double();
default:
return unknown;
@@ -388,55 +429,44 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
}
-TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
+static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
+ switch (binary_type) {
+ // Uninitialized means never executed.
+ case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
+ case BinaryOpIC::SMI: return TypeInfo::Smi();
+ case BinaryOpIC::INT32: return TypeInfo::Integer32();
+ case BinaryOpIC::NUMBER: return TypeInfo::Double();
+ case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
+ case BinaryOpIC::STRING: return TypeInfo::String();
+ case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
+ }
+ UNREACHABLE();
+ return TypeInfo::Unknown();
+}
+
+
+void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result) {
Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) return unknown;
+ if (!object->IsCode()) {
+ *left = *right = *result = unknown;
+ return;
+ }
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_result_type());
-
- switch (type) {
- case BinaryOpIC::UNINITIALIZED:
- // Uninitialized means never executed.
- return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI:
- switch (result_type) {
- case BinaryOpIC::UNINITIALIZED:
- if (expr->op() == Token::DIV) {
- return TypeInfo::Double();
- }
- return TypeInfo::Smi();
- case BinaryOpIC::SMI:
- return TypeInfo::Smi();
- case BinaryOpIC::INT32:
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- default:
- return unknown;
- }
- case BinaryOpIC::INT32:
- if (expr->op() == Token::DIV ||
- result_type == BinaryOpIC::HEAP_NUMBER) {
- return TypeInfo::Double();
- }
- return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
- return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
- return TypeInfo::String();
- case BinaryOpIC::STRING:
- case BinaryOpIC::GENERIC:
- return unknown;
- default:
- return unknown;
- }
+ BinaryOpIC::TypeInfo left_type, right_type, result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &result_type);
+ *left = TypeFromBinaryOpType(left_type);
+ *right = TypeFromBinaryOpType(right_type);
+ *result = TypeFromBinaryOpType(result_type);
+ return;
}
- return unknown;
+ // Not a binary op stub.
+ *left = *right = *result = unknown;
}
@@ -447,28 +477,8 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return unknown;
- CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
- switch (state) {
- case CompareIC::UNINITIALIZED:
- // Uninitialized means never executed.
- // TODO(fschneider): Introduce a separate value for never-executed ICs.
- return unknown;
- case CompareIC::SMIS:
- return TypeInfo::Smi();
- case CompareIC::STRINGS:
- return TypeInfo::String();
- case CompareIC::SYMBOLS:
- return TypeInfo::Symbol();
- case CompareIC::HEAP_NUMBERS:
- return TypeInfo::Number();
- case CompareIC::OBJECTS:
- case CompareIC::KNOWN_OBJECTS:
- // TODO(kasperl): We really need a type for JS objects here.
- return TypeInfo::NonPrimitive();
- case CompareIC::GENERIC:
- default:
- return unknown;
- }
+ CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ return TypeFromCompareType(state);
}
@@ -479,17 +489,21 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
- code->binary_op_type());
- switch (type) {
+ BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
+ BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
+ &right_type, &unused_result_type);
+ // CountOperations should always have +1 or -1 as their right input.
+ ASSERT(right_type == BinaryOpIC::SMI ||
+ right_type == BinaryOpIC::UNINITIALIZED);
+
+ switch (left_type) {
case BinaryOpIC::UNINITIALIZED:
case BinaryOpIC::SMI:
return TypeInfo::Smi();
case BinaryOpIC::INT32:
return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER:
+ case BinaryOpIC::NUMBER:
return TypeInfo::Double();
- case BinaryOpIC::BOTH_STRING:
case BinaryOpIC::STRING:
case BinaryOpIC::GENERIC:
return unknown;
@@ -501,6 +515,29 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
}
+static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
+ Zone* zone) {
+ for (int i = 0; i < list->length(); ++i) {
+ if (list->at(i).is_identical_to(map)) return;
+ }
+ list->Add(map, zone);
+}
+
+
+void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code,
+ SmallMapList* types) {
+ MapHandleList maps;
+ code->FindAllMaps(&maps);
+ types->Reserve(maps.length(), zone());
+ for (int i = 0; i < maps.length(); i++) {
+ Handle<Map> map(maps.at(i));
+ if (!CanRetainOtherContext(*map, *native_context_)) {
+ AddMapIfMissing(map, types, zone());
+ }
+ }
+}
+
+
void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<String> name,
Code::Flags flags,
@@ -508,13 +545,14 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<Object> object = GetInfo(ast_id);
if (object->IsUndefined() || object->IsSmi()) return;
- if (*object ==
- isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) {
+ if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) {
// TODO(fschneider): We could collect the maps and signal that
// we need a generic store (or load) here.
- ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC);
+ ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC);
} else if (object->IsMap()) {
types->Add(Handle<Map>::cast(object), zone());
+ } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) {
+ CollectPolymorphicMaps(Handle<Code>::cast(object), types);
} else if (FLAG_collect_megamorphic_maps_from_stub_cache &&
Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) {
types->Reserve(4, zone());
@@ -562,15 +600,6 @@ bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
}
-static void AddMapIfMissing(Handle<Map> map, SmallMapList* list,
- Zone* zone) {
- for (int i = 0; i < list->length(); ++i) {
- if (list->at(i).is_identical_to(map)) return;
- }
- list->Add(map, zone);
-}
-
-
void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
@@ -578,18 +607,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id,
Handle<Code> code = Handle<Code>::cast(object);
if (code->kind() == Code::KEYED_LOAD_IC ||
code->kind() == Code::KEYED_STORE_IC) {
- AssertNoAllocation no_allocation;
- int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
- for (RelocIterator it(*code, mask); !it.done(); it.next()) {
- RelocInfo* info = it.rinfo();
- Object* object = info->target_object();
- if (object->IsMap()) {
- Map* map = Map::cast(object);
- if (!CanRetainOtherContext(map, *native_context_)) {
- AddMapIfMissing(Handle<Map>(map), types, zone());
- }
- }
- }
+ CollectPolymorphicMaps(code, types);
}
}
@@ -607,7 +625,7 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) {
void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) {
AssertNoAllocation no_allocation;
ZoneList<RelocInfo> infos(16, zone());
- HandleScope scope;
+ HandleScope scope(code->GetIsolate());
GetRelocInfos(code, &infos);
CreateDictionary(code, &infos);
ProcessRelocInfos(&infos);
@@ -684,7 +702,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) {
case Code::KEYED_LOAD_IC:
case Code::KEYED_STORE_IC:
if (target->ic_state() == MONOMORPHIC ||
- target->ic_state() == MEGAMORPHIC) {
+ target->ic_state() == POLYMORPHIC) {
SetInfo(ast_id, target);
}
break;
diff --git a/src/3rdparty/v8/src/type-info.h b/src/3rdparty/v8/src/type-info.h
index 00d88c2..2b50bf4 100644
--- a/src/3rdparty/v8/src/type-info.h
+++ b/src/3rdparty/v8/src/type-info.h
@@ -65,12 +65,12 @@ class TypeInfo {
static TypeInfo Integer32() { return TypeInfo(kInteger32); }
// We know it's a Smi.
static TypeInfo Smi() { return TypeInfo(kSmi); }
- // We know it's a Symbol.
- static TypeInfo Symbol() { return TypeInfo(kSymbol); }
// We know it's a heap number.
static TypeInfo Double() { return TypeInfo(kDouble); }
// We know it's a string.
static TypeInfo String() { return TypeInfo(kString); }
+ // We know it's an internalized string.
+ static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); }
// We know it's a non-primitive (object) type.
static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); }
// We haven't started collecting info yet.
@@ -140,14 +140,14 @@ class TypeInfo {
return ((type_ & kSmi) == kSmi);
}
- inline bool IsSymbol() {
+ inline bool IsInternalizedString() {
ASSERT(type_ != kUninitialized);
- return ((type_ & kSymbol) == kSymbol);
+ return ((type_ & kInternalizedString) == kInternalizedString);
}
- inline bool IsNonSymbol() {
+ inline bool IsNonInternalizedString() {
ASSERT(type_ != kUninitialized);
- return ((type_ & kSymbol) == kString);
+ return ((type_ & kInternalizedString) == kString);
}
inline bool IsInteger32() {
@@ -181,7 +181,7 @@ class TypeInfo {
case kNumber: return "Number";
case kInteger32: return "Integer32";
case kSmi: return "Smi";
- case kSymbol: return "Symbol";
+ case kInternalizedString: return "InternalizedString";
case kDouble: return "Double";
case kString: return "String";
case kNonPrimitive: return "Object";
@@ -193,17 +193,18 @@ class TypeInfo {
private:
enum Type {
- kUnknown = 0, // 0000000
- kPrimitive = 0x10, // 0010000
- kNumber = 0x11, // 0010001
- kInteger32 = 0x13, // 0010011
- kSmi = 0x17, // 0010111
- kDouble = 0x19, // 0011001
- kString = 0x30, // 0110000
- kSymbol = 0x32, // 0110010
- kNonPrimitive = 0x40, // 1000000
- kUninitialized = 0x7f // 1111111
+ kUnknown = 0, // 0000000
+ kPrimitive = 0x10, // 0010000
+ kNumber = 0x11, // 0010001
+ kInteger32 = 0x13, // 0010011
+ kSmi = 0x17, // 0010111
+ kDouble = 0x19, // 0011001
+ kString = 0x30, // 0110000
+ kInternalizedString = 0x32, // 0110010
+ kNonPrimitive = 0x40, // 1000000
+ kUninitialized = 0x7f // 1111111
};
+
explicit inline TypeInfo(Type t) : type_(t) { }
Type type_;
@@ -226,10 +227,11 @@ class CompareOperation;
class CompilationInfo;
class CountOperation;
class Expression;
+class ForInStatement;
+class ICStub;
class Property;
class SmallMapList;
class UnaryOperation;
-class ForInStatement;
class TypeFeedbackOracle: public ZoneObject {
@@ -241,9 +243,9 @@ class TypeFeedbackOracle: public ZoneObject {
bool LoadIsMonomorphicNormal(Property* expr);
bool LoadIsUninitialized(Property* expr);
- bool LoadIsMegamorphicWithTypeInfo(Property* expr);
+ bool LoadIsPolymorphic(Property* expr);
bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id);
- bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id);
+ bool StoreIsPolymorphic(TypeFeedbackId ast_id);
bool CallIsMonomorphic(Call* expr);
bool CallNewIsMonomorphic(CallNew* expr);
bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop);
@@ -270,15 +272,19 @@ class TypeFeedbackOracle: public ZoneObject {
static bool CanRetainOtherContext(JSFunction* function,
Context* native_context);
+ void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types);
+
CheckType GetCallCheckType(Call* expr);
Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check);
Handle<JSFunction> GetCallTarget(Call* expr);
Handle<JSFunction> GetCallNewTarget(CallNew* expr);
+ ElementsKind GetCallNewElementsKind(CallNew* expr);
Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop);
bool LoadIsBuiltin(Property* expr, Builtins::Name id);
+ bool LoadIsStub(Property* expr, ICStub* stub);
// TODO(1571) We can't use ToBooleanStub::Types as the return value because
// of various cylces in our headers. Death to tons of implementations in
@@ -287,9 +293,14 @@ class TypeFeedbackOracle: public ZoneObject {
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
- TypeInfo BinaryType(BinaryOperation* expr);
- TypeInfo CompareType(CompareOperation* expr);
- bool IsSymbolCompare(CompareOperation* expr);
+ void BinaryType(BinaryOperation* expr,
+ TypeInfo* left,
+ TypeInfo* right,
+ TypeInfo* result);
+ void CompareType(CompareOperation* expr,
+ TypeInfo* left_type,
+ TypeInfo* right_type,
+ TypeInfo* overall_type);
Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
@@ -315,8 +326,12 @@ class TypeFeedbackOracle: public ZoneObject {
// Returns an element from the backing store. Returns undefined if
// there is no information.
+ public:
+ // TODO(mvstanton): how to get this information without making the method
+ // public?
Handle<Object> GetInfo(TypeFeedbackId ast_id);
+ private:
Handle<Context> native_context_;
Isolate* isolate_;
Handle<UnseededNumberDictionary> dictionary_;
diff --git a/src/3rdparty/v8/src/unicode-inl.h b/src/3rdparty/v8/src/unicode-inl.h
index ec9c69f..c80c67e 100644
--- a/src/3rdparty/v8/src/unicode-inl.h
+++ b/src/3rdparty/v8/src/unicode-inl.h
@@ -79,6 +79,22 @@ template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n,
}
+uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
+ ASSERT(c > Latin1::kMaxChar);
+ switch (c) {
+ // This are equivalent characters in unicode.
+ case 0x39c:
+ case 0x3bc:
+ return 0xb5;
+ // This is an uppercase of a Latin-1 character
+ // outside of Latin-1.
+ case 0x178:
+ return 0xff;
+ }
+ return 0;
+}
+
+
unsigned Utf8::Encode(char* str, uchar c, int previous) {
static const int kMask = ~(1 << 6);
if (c <= kMaxOneByteChar) {
@@ -137,113 +153,51 @@ unsigned Utf8::Length(uchar c, int previous) {
}
}
-uchar CharacterStream::GetNext() {
- uchar result = DecodeCharacter(buffer_, &cursor_);
- if (remaining_ == 1) {
- cursor_ = 0;
- FillBuffer();
- } else {
- remaining_--;
- }
- ASSERT(BoundsCheck(cursor_));
- return result;
-}
-
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-#define IF_LITTLE(expr) expr
-#define IF_BIG(expr) ((void) 0)
-#elif __BYTE_ORDER == __BIG_ENDIAN
-#define IF_LITTLE(expr) ((void) 0)
-#define IF_BIG(expr) expr
-#else
-#warning Unknown byte ordering
-#endif
-
-bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- if (offset >= capacity) return false;
- buffer[offset] = c;
- offset += 1;
- return true;
-}
-
-bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset) {
- unsigned aligned = (offset + 0x3) & ~0x3;
- if ((aligned + sizeof(uchar)) > capacity)
- return false;
- if (offset == aligned) {
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31));
- } else {
- buffer[offset] = 0x80;
- IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8);
- IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c);
- }
- offset = aligned + sizeof(uchar);
- return true;
-}
-
-bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset) {
- if (c <= Utf8::kMaxOneByteChar) {
- return EncodeAsciiCharacter(c, buffer, capacity, offset);
- } else {
- return EncodeNonAsciiCharacter(c, buffer, capacity, offset);
- }
-}
-
-uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) {
- byte b = buffer[*offset];
- if (b <= Utf8::kMaxOneByteChar) {
- (*offset)++;
- return b;
- } else {
- unsigned aligned = (*offset + 0x3) & ~0x3;
- *offset = aligned + sizeof(uchar);
- IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8);
- IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) &
- ~(1 << 31));
- }
-}
-
-#undef IF_LITTLE
-#undef IF_BIG
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::FillBuffer() {
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Rewind() {
- Reset(input_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(unsigned position, I input) {
- input_ = input;
- remaining_ = 0;
- cursor_ = 0;
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Reset(I input) {
- Reset(0, input);
-}
-
-template <class R, class I, unsigned s>
-void InputBuffer<R, I, s>::Seek(unsigned position) {
- offset_ = position;
- buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_);
-}
-
-template <unsigned s>
-Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length)
- : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data,
- length)) {
+Utf8DecoderBase::Utf8DecoderBase()
+ : unbuffered_start_(NULL),
+ utf16_length_(0),
+ last_byte_of_buffer_unused_(false) {}
+
+Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length) {
+ Reset(buffer, buffer_length, stream, stream_length);
+}
+
+template<unsigned kBufferSize>
+Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length)
+ : Utf8DecoderBase(buffer_,
+ kBufferSize,
+ reinterpret_cast<const uint8_t*>(stream),
+ length) {
+}
+
+template<unsigned kBufferSize>
+void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) {
+ Utf8DecoderBase::Reset(buffer_,
+ kBufferSize,
+ reinterpret_cast<const uint8_t*>(stream),
+ length);
+}
+
+template <unsigned kBufferSize>
+unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
+ unsigned length) const {
+ ASSERT(length > 0);
+ if (length > utf16_length_) length = utf16_length_;
+ // memcpy everything in buffer.
+ unsigned buffer_length =
+ last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
+ unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
+ memcpy(data, buffer_, memcpy_length*sizeof(uint16_t));
+ if (length <= buffer_length) return length;
+ ASSERT(unbuffered_start_ != NULL);
+ // Copy the rest the slow way.
+ WriteUtf16Slow(unbuffered_start_,
+ data + buffer_length,
+ length - buffer_length);
+ return length;
}
} // namespace unibrow
diff --git a/src/3rdparty/v8/src/unicode.cc b/src/3rdparty/v8/src/unicode.cc
index 14f3806..04065b0 100644
--- a/src/3rdparty/v8/src/unicode.cc
+++ b/src/3rdparty/v8/src/unicode.cc
@@ -277,84 +277,74 @@ uchar Utf8::CalculateValue(const byte* str,
}
-const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) {
- unsigned offset = *offset_ptr;
- // Bail out early if we've reached the end of the string.
- if (offset == str.length()) {
- *chars_read_ptr = 0;
- return NULL;
- }
- const byte* data = reinterpret_cast<const byte*>(str.data());
- if (data[offset] <= kMaxOneByteChar) {
- // The next character is an ASCII char so we scan forward over
- // the following ASCII characters and return the next pure ASCII
- // substring
- const byte* result = data + offset;
- offset++;
- while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar))
- offset++;
- *chars_read_ptr = offset - *offset_ptr;
- *offset_ptr = offset;
- return result;
- } else {
- // The next character is non-ASCII so we just fill the buffer
+void Utf8DecoderBase::Reset(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length) {
+ // Assume everything will fit in the buffer and stream won't be needed.
+ last_byte_of_buffer_unused_ = false;
+ unbuffered_start_ = NULL;
+ bool writing_to_buffer = true;
+ // Loop until stream is read, writing to buffer as long as buffer has space.
+ unsigned utf16_length = 0;
+ while (stream_length != 0) {
unsigned cursor = 0;
- unsigned chars_read = 0;
- while (offset < str.length()) {
- uchar c = data[offset];
- if (c <= kMaxOneByteChar) {
- // Fast case for ASCII characters
- if (!CharacterStream::EncodeAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += 1;
+ uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
+ ASSERT(cursor > 0 && cursor <= stream_length);
+ stream += cursor;
+ stream_length -= cursor;
+ bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
+ utf16_length += is_two_characters ? 2 : 1;
+ // Don't need to write to the buffer, but still need utf16_length.
+ if (!writing_to_buffer) continue;
+ // Write out the characters to the buffer.
+ // Must check for equality with buffer_length as we've already updated it.
+ if (utf16_length <= buffer_length) {
+ if (is_two_characters) {
+ *buffer++ = Utf16::LeadSurrogate(character);
+ *buffer++ = Utf16::TrailSurrogate(character);
} else {
- unsigned chars = 0;
- c = Utf8::ValueOf(data + offset, str.length() - offset, &chars);
- if (!CharacterStream::EncodeNonAsciiCharacter(c,
- buffer,
- capacity,
- cursor))
- break;
- offset += chars;
+ *buffer++ = character;
+ }
+ if (utf16_length == buffer_length) {
+ // Just wrote last character of buffer
+ writing_to_buffer = false;
+ unbuffered_start_ = stream;
}
- chars_read++;
+ continue;
}
- *offset_ptr = offset;
- *chars_read_ptr = chars_read;
- return buffer;
+ // Have gone over buffer.
+ // Last char of buffer is unused, set cursor back.
+ ASSERT(is_two_characters);
+ writing_to_buffer = false;
+ last_byte_of_buffer_unused_ = true;
+ unbuffered_start_ = stream - cursor;
}
+ utf16_length_ = utf16_length;
}
-unsigned CharacterStream::Length() {
- unsigned result = 0;
- while (has_more()) {
- result++;
- GetNext();
- }
- Rewind();
- return result;
-}
-unsigned CharacterStream::Utf16Length() {
- unsigned result = 0;
- while (has_more()) {
- uchar c = GetNext();
- result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1;
+void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream,
+ uint16_t* data,
+ unsigned data_length) {
+ while (data_length != 0) {
+ unsigned cursor = 0;
+ uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor);
+ // There's a total lack of bounds checking for stream
+ // as it was already done in Reset.
+ stream += cursor;
+ if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+ *data++ = Utf16::LeadSurrogate(character);
+ *data++ = Utf16::TrailSurrogate(character);
+ ASSERT(data_length > 1);
+ data_length -= 2;
+ } else {
+ *data++ = character;
+ data_length -= 1;
+ }
}
- Rewind();
- return result;
}
-void CharacterStream::Seek(unsigned position) {
- Rewind();
- for (unsigned i = 0; i < position; i++) {
- GetNext();
- }
-}
// Uppercase: point.category == 'Lu'
diff --git a/src/3rdparty/v8/src/unicode.h b/src/3rdparty/v8/src/unicode.h
index 00227c2..550b04a 100644
--- a/src/3rdparty/v8/src/unicode.h
+++ b/src/3rdparty/v8/src/unicode.h
@@ -31,7 +31,7 @@
#ifndef _WIN32_WCE
#include <sys/types.h>
#endif
-
+#include <globals.h>
/**
* \file
* Definitions and convenience functions for working with unicode.
@@ -102,21 +102,6 @@ class UnicodeData {
static const uchar kMaxCodePoint;
};
-// --- U t f 8 a n d 16 ---
-
-template <typename Data>
-class Buffer {
- public:
- inline Buffer(Data data, unsigned length) : data_(data), length_(length) { }
- inline Buffer() : data_(0), length_(0) { }
- Data data() { return data_; }
- unsigned length() { return length_; }
- private:
- Data data_;
- unsigned length_;
-};
-
-
class Utf16 {
public:
static inline bool IsLeadSurrogate(int code) {
@@ -142,22 +127,32 @@ class Utf16 {
// One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes.
// The illegality stems from the surrogate not being part of a pair.
static const int kUtf8BytesToCodeASurrogate = 3;
- static inline uchar LeadSurrogate(int char_code) {
+ static inline uint16_t LeadSurrogate(uint32_t char_code) {
return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff);
}
- static inline uchar TrailSurrogate(int char_code) {
+ static inline uint16_t TrailSurrogate(uint32_t char_code) {
return 0xdc00 + (char_code & 0x3ff);
}
};
+class Latin1 {
+ public:
+#ifndef ENABLE_LATIN_1
+ static const unsigned kMaxChar = 0x7f;
+#else
+ static const unsigned kMaxChar = 0xff;
+#endif
+ // Returns 0 if character does not convert to single latin-1 character
+ // or if the character doesn't not convert back to latin-1 via inverse
+ // operation (upper to lower, etc).
+ static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t);
+};
class Utf8 {
public:
static inline uchar Length(uchar chr, int previous);
static inline unsigned Encode(
char* out, uchar c, int previous);
- static const byte* ReadBlock(Buffer<const char*> str, byte* buffer,
- unsigned capacity, unsigned* chars_read, unsigned* offset);
static uchar CalculateValue(const byte* str,
unsigned length,
unsigned* cursor);
@@ -172,92 +167,47 @@ class Utf8 {
// that match are coded as a 4 byte UTF-8 sequence.
static const unsigned kBytesSavedByCombiningSurrogates = 2;
static const unsigned kSizeOfUnmatchedSurrogate = 3;
-
- private:
- template <unsigned s> friend class Utf8InputBuffer;
- friend class Test;
static inline uchar ValueOf(const byte* str,
unsigned length,
unsigned* cursor);
};
-// --- C h a r a c t e r S t r e a m ---
-class CharacterStream {
+class Utf8DecoderBase {
public:
- inline uchar GetNext();
- inline bool has_more() { return remaining_ != 0; }
- // Note that default implementation is not efficient.
- virtual void Seek(unsigned);
- unsigned Length();
- unsigned Utf16Length();
- virtual ~CharacterStream() { }
- static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity,
- unsigned& offset);
- static inline bool EncodeAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer,
- unsigned capacity, unsigned& offset);
- static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset);
- virtual void Rewind() = 0;
-
+ // Initialization done in subclass.
+ inline Utf8DecoderBase();
+ inline Utf8DecoderBase(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length);
+ inline unsigned Utf16Length() const { return utf16_length_; }
protected:
- virtual void FillBuffer() = 0;
- virtual bool BoundsCheck(unsigned offset) = 0;
- // The number of characters left in the current buffer
- unsigned remaining_;
- // The current offset within the buffer
- unsigned cursor_;
- // The buffer containing the decoded characters.
- const byte* buffer_;
-};
-
-// --- I n p u t B u f f e r ---
-
-/**
- * Provides efficient access to encoded characters in strings. It
- * does so by reading characters one block at a time, rather than one
- * character at a time, which gives string implementations an
- * opportunity to optimize the decoding.
- */
-template <class Reader, class Input = Reader*, unsigned kSize = 256>
-class InputBuffer : public CharacterStream {
- public:
- virtual void Rewind();
- inline void Reset(Input input);
- void Seek(unsigned position);
- inline void Reset(unsigned position, Input input);
- protected:
- InputBuffer() { }
- explicit InputBuffer(Input input) { Reset(input); }
- virtual void FillBuffer();
- virtual bool BoundsCheck(unsigned offset) {
- return (buffer_ != util_buffer_) || (offset < kSize);
- }
-
- // A custom offset that can be used by the string implementation to
- // mark progress within the encoded string.
- unsigned offset_;
- // The input string
- Input input_;
- // To avoid heap allocation, we keep an internal buffer to which
- // the encoded string can write its characters. The string
- // implementation is free to decide whether it wants to use this
- // buffer or not.
- byte util_buffer_[kSize];
+ // This reads all characters and sets the utf16_length_.
+ // The first buffer_length utf16 chars are cached in the buffer.
+ void Reset(uint16_t* buffer,
+ unsigned buffer_length,
+ const uint8_t* stream,
+ unsigned stream_length);
+ static void WriteUtf16Slow(const uint8_t* stream,
+ uint16_t* data,
+ unsigned length);
+ const uint8_t* unbuffered_start_;
+ unsigned utf16_length_;
+ bool last_byte_of_buffer_unused_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase);
};
-// --- U t f 8 I n p u t B u f f e r ---
-
-template <unsigned s = 256>
-class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> {
+template <unsigned kBufferSize>
+class Utf8Decoder : public Utf8DecoderBase {
public:
- inline Utf8InputBuffer() { }
- inline Utf8InputBuffer(const char* data, unsigned length);
- inline void Reset(const char* data, unsigned length) {
- InputBuffer<Utf8, Buffer<const char*>, s>::Reset(
- Buffer<const char*>(data, length));
- }
+ inline Utf8Decoder() {}
+ inline Utf8Decoder(const char* stream, unsigned length);
+ inline void Reset(const char* stream, unsigned length);
+ inline unsigned WriteUtf16(uint16_t* data, unsigned length) const;
+ private:
+ uint16_t buffer_[kBufferSize];
};
diff --git a/src/3rdparty/v8/src/uri.h b/src/3rdparty/v8/src/uri.h
new file mode 100644
index 0000000..c7a6301
--- /dev/null
+++ b/src/3rdparty/v8/src/uri.h
@@ -0,0 +1,309 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_URI_H_
+#define V8_URI_H_
+
+#include "v8.h"
+
+#include "string-search.h"
+#include "v8utils.h"
+#include "v8conversions.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename Char>
+static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
+
+
+template <>
+Vector<const uint8_t> GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsAscii());
+ return flat.ToOneByteVector();
+}
+
+
+template <>
+Vector<const uc16> GetCharVector(Handle<String> string) {
+ String::FlatContent flat = string->GetFlatContent();
+ ASSERT(flat.IsTwoByte());
+ return flat.ToUC16Vector();
+}
+
+
+class URIUnescape : public AllStatic {
+ public:
+ template<typename Char>
+ static Handle<String> Unescape(Isolate* isolate, Handle<String> source);
+
+ private:
+ static const signed char kHexValue['g'];
+
+ template<typename Char>
+ static Handle<String> UnescapeSlow(
+ Isolate* isolate, Handle<String> string, int start_index);
+
+ static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2));
+
+ template <typename Char>
+ static INLINE(int UnescapeChar(Vector<const Char> vector,
+ int i,
+ int length,
+ int* step));
+};
+
+
+const signed char URIUnescape::kHexValue[] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 10, 11, 12, 13, 14, 15 };
+
+
+template<typename Char>
+Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) {
+ int index;
+ { AssertNoAllocation no_allocation;
+ StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
+ index = search.Search(GetCharVector<Char>(source), 0);
+ if (index < 0) return source;
+ }
+ return UnescapeSlow<Char>(isolate, source, index);
+}
+
+
+template <typename Char>
+Handle<String> URIUnescape::UnescapeSlow(
+ Isolate* isolate, Handle<String> string, int start_index) {
+ bool one_byte = true;
+ int length = string->length();
+
+ int unescaped_length = 0;
+ { AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; unescaped_length++) {
+ int step;
+ if (UnescapeChar(vector, i, length, &step) >
+ String::kMaxOneByteCharCode) {
+ one_byte = false;
+ }
+ i += step;
+ }
+ }
+
+ ASSERT(start_index < length);
+ Handle<String> first_part =
+ isolate->factory()->NewProperSubString(string, 0, start_index);
+
+ int dest_position = 0;
+ Handle<String> second_part;
+ if (one_byte) {
+ Handle<SeqOneByteString> dest =
+ isolate->factory()->NewRawOneByteString(unescaped_length);
+ AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqOneByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ } else {
+ Handle<SeqTwoByteString> dest =
+ isolate->factory()->NewRawTwoByteString(unescaped_length);
+ AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = start_index; i < length; dest_position++) {
+ int step;
+ dest->SeqTwoByteStringSet(dest_position,
+ UnescapeChar(vector, i, length, &step));
+ i += step;
+ }
+ second_part = dest;
+ }
+ return isolate->factory()->NewConsString(first_part, second_part);
+}
+
+
+int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) {
+ if (character1 > 'f') return -1;
+ int hi = kHexValue[character1];
+ if (hi == -1) return -1;
+ if (character2 > 'f') return -1;
+ int lo = kHexValue[character2];
+ if (lo == -1) return -1;
+ return (hi << 4) + lo;
+}
+
+
+template <typename Char>
+int URIUnescape::UnescapeChar(Vector<const Char> vector,
+ int i,
+ int length,
+ int* step) {
+ uint16_t character = vector[i];
+ int32_t hi = 0;
+ int32_t lo = 0;
+ if (character == '%' &&
+ i <= length - 6 &&
+ vector[i + 1] == 'u' &&
+ (hi = TwoDigitHex(vector[i + 2],
+ vector[i + 3])) != -1 &&
+ (lo = TwoDigitHex(vector[i + 4],
+ vector[i + 5])) != -1) {
+ *step = 6;
+ return (hi << 8) + lo;
+ } else if (character == '%' &&
+ i <= length - 3 &&
+ (lo = TwoDigitHex(vector[i + 1],
+ vector[i + 2])) != -1) {
+ *step = 3;
+ return lo;
+ } else {
+ *step = 1;
+ return character;
+ }
+}
+
+
+class URIEscape : public AllStatic {
+ public:
+ template<typename Char>
+ static Handle<String> Escape(Isolate* isolate, Handle<String> string);
+
+ private:
+ static const char kHexChars[17];
+ static const char kNotEscaped[256];
+
+ static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; }
+};
+
+
+const char URIEscape::kHexChars[] = "0123456789ABCDEF";
+
+
+// kNotEscaped is generated by the following:
+//
+// #!/bin/perl
+// for (my $i = 0; $i < 256; $i++) {
+// print "\n" if $i % 16 == 0;
+// my $c = chr($i);
+// my $escaped = 1;
+// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#;
+// print $escaped ? "0, " : "1, ";
+// }
+
+const char URIEscape::kNotEscaped[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+
+template<typename Char>
+Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
+ ASSERT(string->IsFlat());
+ int escaped_length = 0;
+ int length = string->length();
+
+ { AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ escaped_length += 6;
+ } else if (IsNotEscaped(c)) {
+ escaped_length++;
+ } else {
+ escaped_length += 3;
+ }
+
+ // We don't allow strings that are longer than a maximal length.
+ ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
+ if (escaped_length > String::kMaxLength) {
+ isolate->context()->mark_out_of_memory();
+ return Handle<String>::null();
+ }
+ }
+ }
+
+ // No length change implies no change. Return original string if no change.
+ if (escaped_length == length) return string;
+
+ Handle<SeqOneByteString> dest =
+ isolate->factory()->NewRawOneByteString(escaped_length);
+ int dest_position = 0;
+
+ { AssertNoAllocation no_allocation;
+ Vector<const Char> vector = GetCharVector<Char>(string);
+ for (int i = 0; i < length; i++) {
+ uint16_t c = vector[i];
+ if (c >= 256) {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position+1, 'u');
+ dest->SeqOneByteStringSet(dest_position+2, kHexChars[c >> 12]);
+ dest->SeqOneByteStringSet(dest_position+3, kHexChars[(c >> 8) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position+4, kHexChars[(c >> 4) & 0xf]);
+ dest->SeqOneByteStringSet(dest_position+5, kHexChars[c & 0xf]);
+ dest_position += 6;
+ } else if (IsNotEscaped(c)) {
+ dest->SeqOneByteStringSet(dest_position, c);
+ dest_position++;
+ } else {
+ dest->SeqOneByteStringSet(dest_position, '%');
+ dest->SeqOneByteStringSet(dest_position+1, kHexChars[c >> 4]);
+ dest->SeqOneByteStringSet(dest_position+2, kHexChars[c & 0xf]);
+ dest_position += 3;
+ }
+ }
+ }
+
+ return dest;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_URI_H_
diff --git a/src/3rdparty/v8/src/uri.js b/src/3rdparty/v8/src/uri.js
index b195f3d..1de22f8 100644
--- a/src/3rdparty/v8/src/uri.js
+++ b/src/3rdparty/v8/src/uri.js
@@ -165,11 +165,11 @@ function URIDecodeOctets(octets, result, index) {
throw new $URIError("URI malformed");
}
if (value < 0x10000) {
- result[index++] = value;
+ %_TwoByteSeqStringSetChar(result, index++, value);
return index;
} else {
- result[index++] = (value >> 10) + 0xd7c0;
- result[index++] = (value & 0x3ff) + 0xdc00;
+ %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
+ %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
return index;
}
}
@@ -178,43 +178,72 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var array = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
if (unescape(cc1)) {
- result[index++] = cc1;
+ array[index++] = cc1;
} else {
if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, result, index);
+ index = URIEncodeSingle(cc1, array, index);
} else {
k++;
if (k == uriLength) throw new $URIError("URI malformed");
var cc2 = uri.charCodeAt(k);
if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, result, index);
+ index = URIEncodePair(cc1, cc2, array, index);
}
}
}
- return %StringFromCharCodeArray(result);
+
+ var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
+ for (var i = 0; i < array.length; i++) {
+ %_OneByteSeqStringSetChar(result, i, array[i]);
+ }
+ return result;
}
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- // We are going to pass result to %StringFromCharCodeArray
- // which does not expect any getters/setters installed
- // on the incoming array.
- var result = new InternalArray(uriLength);
+ var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
var index = 0;
- for (var k = 0; k < uriLength; k++) {
- var ch = uri.charAt(k);
- if (ch == '%') {
+ var k = 0;
+
+ // Optimistically assume ascii string.
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
+ if (k + 2 >= uriLength) throw new $URIError("URI malformed");
+ var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
+ if (cc >> 7) break; // Assumption wrong, two byte string.
+ if (reserved(cc)) {
+ %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
+ %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+ } else {
+ %_OneByteSeqStringSetChar(one_byte, index++, cc);
+ }
+ k += 2;
+ } else {
+ if (code > 0x7f) break; // Assumption wrong, two byte string.
+ %_OneByteSeqStringSetChar(one_byte, index++, code);
+ }
+ }
+
+ one_byte = %TruncateString(one_byte, index);
+ if (k == uriLength) return one_byte;
+
+ // Write into two byte string.
+ var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
+ index = 0;
+
+ for ( ; k < uriLength; k++) {
+ var code = uri.charCodeAt(k);
+ if (code == 37) { // '%'
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
@@ -229,22 +258,21 @@ function Decode(uri, reserved) {
octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
uri.charCodeAt(++k));
}
- index = URIDecodeOctets(octets, result, index);
+ index = URIDecodeOctets(octets, two_byte, index);
+ } else if (reserved(cc)) {
+ %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
+ %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
} else {
- if (reserved(cc)) {
- result[index++] = 37; // Char code of '%'.
- result[index++] = uri.charCodeAt(k - 1);
- result[index++] = uri.charCodeAt(k);
- } else {
- result[index++] = cc;
- }
+ %_TwoByteSeqStringSetChar(two_byte, index++, cc);
}
} else {
- result[index++] = ch.charCodeAt(0);
+ %_TwoByteSeqStringSetChar(two_byte, index++, code);
}
}
- result.length = index;
- return %StringFromCharCodeArray(result);
+
+ two_byte = %TruncateString(two_byte, index);
+ return one_byte + two_byte;
}
diff --git a/src/3rdparty/v8/src/utils.h b/src/3rdparty/v8/src/utils.h
index e03f96f..c391b9c 100644
--- a/src/3rdparty/v8/src/utils.h
+++ b/src/3rdparty/v8/src/utils.h
@@ -249,6 +249,7 @@ class BitField {
// using a shift count of 32.
static const uint32_t kMask = ((1U << shift) << size) - (1U << shift);
static const uint32_t kShift = shift;
+ static const uint32_t kSize = size;
// Value for the field with all bits set.
static const T kMax = static_cast<T>((1U << size) - 1);
@@ -304,7 +305,7 @@ inline uint32_t ComputeLongHash(uint64_t key) {
hash = hash ^ (hash >> 11);
hash = hash + (hash << 6);
hash = hash ^ (hash >> 22);
- return (uint32_t) hash;
+ return static_cast<uint32_t>(hash);
}
@@ -522,11 +523,22 @@ class ScopedVector : public Vector<T> {
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector);
};
+#define STATIC_ASCII_VECTOR(x) \
+ v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
+ ARRAY_SIZE(x)-1)
inline Vector<const char> CStrVector(const char* data) {
return Vector<const char>(data, StrLength(data));
}
+inline Vector<const uint8_t> OneByteVector(const char* data, int length) {
+ return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length);
+}
+
+inline Vector<const uint8_t> OneByteVector(const char* data) {
+ return OneByteVector(data, StrLength(data));
+}
+
inline Vector<char> MutableCStrVector(char* data) {
return Vector<char>(data, StrLength(data));
}
@@ -765,7 +777,9 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> {
// Compare ASCII/16bit chars to ASCII/16bit chars.
template <typename lchar, typename rchar>
-inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+inline int CompareCharsUnsigned(const lchar* lhs,
+ const rchar* rhs,
+ int chars) {
const lchar* limit = lhs + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*lhs) == sizeof(*rhs)) {
@@ -790,6 +804,33 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
return 0;
}
+template<typename lchar, typename rchar>
+inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
+ ASSERT(sizeof(lchar) <= 2);
+ ASSERT(sizeof(rchar) <= 2);
+ if (sizeof(lchar) == 1) {
+ if (sizeof(rchar) == 1) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
+ reinterpret_cast<const uint8_t*>(rhs),
+ chars);
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
+ reinterpret_cast<const uint16_t*>(rhs),
+ chars);
+ }
+ } else {
+ if (sizeof(rchar) == 1) {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
+ reinterpret_cast<const uint8_t*>(rhs),
+ chars);
+ } else {
+ return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs),
+ reinterpret_cast<const uint16_t*>(rhs),
+ chars);
+ }
+ }
+}
+
// Calculate 10^exponent.
inline int TenToThe(int exponent) {
@@ -1015,6 +1056,7 @@ class BailoutId {
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
+ static BailoutId StubEntry() { return BailoutId(kStubEntryId); }
bool IsNone() const { return id_ == kNoneId; }
bool operator==(const BailoutId& other) const { return id_ == other.id_; }
@@ -1030,9 +1072,12 @@ class BailoutId {
// code (function declarations).
static const int kDeclarationsId = 3;
- // Ever FunctionState starts with this id.
+ // Every FunctionState starts with this id.
static const int kFirstUsableId = 4;
+ // Every compiled stub starts with this id.
+ static const int kStubEntryId = 5;
+
int id_;
};
diff --git a/src/3rdparty/v8/src/v8-counters.cc b/src/3rdparty/v8/src/v8-counters.cc
index 3f83dff..4107dd3 100644
--- a/src/3rdparty/v8/src/v8-counters.cc
+++ b/src/3rdparty/v8/src/v8-counters.cc
@@ -86,17 +86,6 @@ Counters::Counters() {
size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
-
- StatsCounter state_counters[] = {
-#define COUNTER_NAME(name) \
- { "c:V8.State" #name, NULL, false },
- STATE_TAG_LIST(COUNTER_NAME)
-#undef COUNTER_NAME
- };
-
- for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
- state_counters_[i] = state_counters[i];
- }
}
void Counters::ResetHistograms() {
diff --git a/src/3rdparty/v8/src/v8-counters.h b/src/3rdparty/v8/src/v8-counters.h
index fad3454..374ebbc 100644
--- a/src/3rdparty/v8/src/v8-counters.h
+++ b/src/3rdparty/v8/src/v8-counters.h
@@ -50,7 +50,6 @@ namespace internal {
HT(compile_eval, V8.CompileEval) \
HT(compile_lazy, V8.CompileLazy)
-
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
HP(external_fragmentation_total, \
V8.MemoryExternalFragmentationTotal) \
@@ -99,7 +98,7 @@ namespace internal {
SC(alive_after_last_gc, V8.AliveAfterLastGC) \
SC(objs_since_last_young, V8.ObjsSinceLastYoung) \
SC(objs_since_last_full, V8.ObjsSinceLastFull) \
- SC(symbol_table_capacity, V8.SymbolTableCapacity) \
+ SC(string_table_capacity, V8.StringTableCapacity) \
SC(number_of_symbols, V8.NumberOfSymbols) \
SC(script_wrappers, V8.ScriptWrappers) \
SC(call_initialize_stubs, V8.CallInitializeStubs) \
@@ -374,16 +373,9 @@ class Counters {
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
-#define COUNTER_ID(name) k_##name,
- STATE_TAG_LIST(COUNTER_ID)
-#undef COUNTER_ID
stats_counter_count
};
- StatsCounter* state_counters(StateTag state) {
- return &state_counters_[state];
- }
-
void ResetHistograms();
private:
@@ -426,15 +418,6 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
- enum {
-#define COUNTER_ID(name) __##name,
- STATE_TAG_LIST(COUNTER_ID)
-#undef COUNTER_ID
- kSlidingStateWindowCounterCount
- };
-
- // Sliding state window counters.
- StatsCounter state_counters_[kSlidingStateWindowCounterCount];
friend class Isolate;
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
diff --git a/src/3rdparty/v8/src/v8.cc b/src/3rdparty/v8/src/v8.cc
index 7d01582..1753650 100644
--- a/src/3rdparty/v8/src/v8.cc
+++ b/src/3rdparty/v8/src/v8.cc
@@ -63,8 +63,6 @@ static EntropySource entropy_source;
bool V8::Initialize(Deserializer* des) {
- FlagList::EnforceFlagImplications();
-
InitializeOncePerProcess();
// The current thread may not yet had entered an isolate to run.
@@ -115,7 +113,9 @@ void V8::TearDown() {
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
+ ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
+ Isolate::GlobalTearDown();
is_running_ = false;
has_been_disposed_ = true;
@@ -262,31 +262,20 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number,
}
void V8::InitializeOncePerProcessImpl() {
- OS::SetUp();
-
- use_crankshaft_ = FLAG_crankshaft;
-
- if (Serializer::enabled()) {
- use_crankshaft_ = false;
- }
-
- CPU::SetUp();
- if (!CPU::SupportsCrankshaft()) {
- use_crankshaft_ = false;
- }
-
- OS::PostSetUp();
-
- RuntimeProfiler::GlobalSetUp();
-
- ElementsAccessor::InitializeOncePerProcess();
-
+ FlagList::EnforceFlagImplications();
if (FLAG_stress_compaction) {
FLAG_force_marking_deque_overflows = true;
FLAG_gc_global = true;
FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2;
}
-
+ OS::SetUp();
+ CPU::SetUp();
+ use_crankshaft_ = FLAG_crankshaft
+ && !Serializer::enabled()
+ && CPU::SupportsCrankshaft();
+ OS::PostSetUp();
+ RuntimeProfiler::GlobalSetUp();
+ ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
SamplerRegistry::SetUp();
diff --git a/src/3rdparty/v8/src/v8.h b/src/3rdparty/v8/src/v8.h
index 201cdf2..e9c3d40 100644
--- a/src/3rdparty/v8/src/v8.h
+++ b/src/3rdparty/v8/src/v8.h
@@ -70,6 +70,7 @@
#include "log-inl.h"
#include "cpu-profiler-inl.h"
#include "handles-inl.h"
+#include "heap-snapshot-generator-inl.h"
#include "zone-inl.h"
namespace v8 {
diff --git a/src/3rdparty/v8/src/v8conversions.cc b/src/3rdparty/v8/src/v8conversions.cc
index bf175e5..900b62d 100644
--- a/src/3rdparty/v8/src/v8conversions.cc
+++ b/src/3rdparty/v8/src/v8conversions.cc
@@ -41,40 +41,40 @@ namespace internal {
namespace {
-// C++-style iterator adaptor for StringInputBuffer
+// C++-style iterator adaptor for StringCharacterStream
// (unlike C++ iterators the end-marker has different type).
-class StringInputBufferIterator {
+class StringCharacterStreamIterator {
public:
class EndMarker {};
- explicit StringInputBufferIterator(StringInputBuffer* buffer);
+ explicit StringCharacterStreamIterator(StringCharacterStream* stream);
- int operator*() const;
+ uint16_t operator*() const;
void operator++();
bool operator==(EndMarker const&) const { return end_; }
bool operator!=(EndMarker const& m) const { return !end_; }
private:
- StringInputBuffer* const buffer_;
- int current_;
+ StringCharacterStream* const stream_;
+ uint16_t current_;
bool end_;
};
-StringInputBufferIterator::StringInputBufferIterator(
- StringInputBuffer* buffer) : buffer_(buffer) {
+StringCharacterStreamIterator::StringCharacterStreamIterator(
+ StringCharacterStream* stream) : stream_(stream) {
++(*this);
}
-int StringInputBufferIterator::operator*() const {
+uint16_t StringCharacterStreamIterator::operator*() const {
return current_;
}
-void StringInputBufferIterator::operator++() {
- end_ = !buffer_->has_more();
+void StringCharacterStreamIterator::operator++() {
+ end_ = !stream_->HasMore();
if (!end_) {
- current_ = buffer_->GetNext();
+ current_ = stream_->GetNext();
}
}
} // End anonymous namespace.
@@ -83,9 +83,10 @@ void StringInputBufferIterator::operator++() {
double StringToDouble(UnicodeCache* unicode_cache,
String* str, int flags, double empty_string_val) {
StringShape shape(str);
+ // TODO(dcarney): Use a Visitor here.
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
+ const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
+ const uint8_t* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
@@ -94,10 +95,11 @@ double StringToDouble(UnicodeCache* unicode_cache,
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else {
- StringInputBuffer buffer(str);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op);
return InternalStringToDouble(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
+ StringCharacterStreamIterator(&stream),
+ StringCharacterStreamIterator::EndMarker(),
flags,
empty_string_val);
}
@@ -108,19 +110,21 @@ double StringToInt(UnicodeCache* unicode_cache,
String* str,
int radix) {
StringShape shape(str);
+ // TODO(dcarney): Use a Visitor here.
if (shape.IsSequentialAscii()) {
- const char* begin = SeqAsciiString::cast(str)->GetChars();
- const char* end = begin + str->length();
+ const uint8_t* begin = SeqOneByteString::cast(str)->GetChars();
+ const uint8_t* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else {
- StringInputBuffer buffer(str);
+ ConsStringIteratorOp op;
+ StringCharacterStream stream(str, &op);
return InternalStringToInt(unicode_cache,
- StringInputBufferIterator(&buffer),
- StringInputBufferIterator::EndMarker(),
+ StringCharacterStreamIterator(&stream),
+ StringCharacterStreamIterator::EndMarker(),
radix);
}
}
diff --git a/src/3rdparty/v8/src/v8globals.h b/src/3rdparty/v8/src/v8globals.h
index 95390ad..072c365 100644
--- a/src/3rdparty/v8/src/v8globals.h
+++ b/src/3rdparty/v8/src/v8globals.h
@@ -71,6 +71,8 @@ const Address kZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef));
const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf));
+const Address kGlobalHandleZapValue =
+ reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf));
const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb);
@@ -79,6 +81,7 @@ const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf;
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf);
+const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf);
const uint32_t kSlotsZapValue = 0xbeefdeef;
const uint32_t kDebugZapValue = 0xbadbaddb;
@@ -260,16 +263,20 @@ enum InlineCacheState {
// Like MONOMORPHIC but check failed due to prototype.
MONOMORPHIC_PROTOTYPE_FAILURE,
// Multiple receiver types have been seen.
+ POLYMORPHIC,
+ // Many receiver types have been seen.
MEGAMORPHIC,
- // Special states for debug break or step in prepare stubs.
- DEBUG_BREAK,
- DEBUG_PREPARE_STEP_IN
+ // A generic handler is installed and no extra typefeedback is recorded.
+ GENERIC,
+ // Special state for debug break or step in prepare stubs.
+ DEBUG_STUB
};
enum CheckType {
RECEIVER_MAP_CHECK,
STRING_CHECK,
+ SYMBOL_CHECK,
NUMBER_CHECK,
BOOLEAN_CHECK
};
@@ -287,7 +294,7 @@ enum CallFunctionFlags {
enum InlineCacheHolderFlag {
OWN_MAP, // For fast properties objects.
- PROTOTYPE_MAP // For slow properties objects (except GlobalObjects).
+ DELEGATE_MAP // For slow properties objects (except GlobalObjects).
};
@@ -351,20 +358,13 @@ struct AccessorDescriptor {
// VMState object leaves a state by popping the current state from the
// stack.
-#define STATE_TAG_LIST(V) \
- V(JS) \
- V(GC) \
- V(COMPILER) \
- V(PARALLEL_COMPILER_PROLOGUE) \
- V(OTHER) \
- V(EXTERNAL)
-
enum StateTag {
-#define DEF_STATE_TAG(name) name,
- STATE_TAG_LIST(DEF_STATE_TAG)
-#undef DEF_STATE_TAG
- // Pseudo-types.
- state_tag_count
+ JS,
+ GC,
+ COMPILER,
+ PARALLEL_COMPILER,
+ OTHER,
+ EXTERNAL
};
@@ -435,6 +435,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
SUDIV = 4, // ARM
UNALIGNED_ACCESSES = 5, // ARM
MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
+ VFP32DREGS = 7, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
@@ -483,11 +484,19 @@ enum VariableMode {
CONST, // declared via 'const' declarations
- LET, // declared via 'let' declarations
+ LET, // declared via 'let' declarations (first lexical)
CONST_HARMONY, // declared via 'const' declarations in harmony mode
+ MODULE, // declared via 'module' declaration (last lexical)
+
// Variables introduced by the compiler:
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY, // temporary variables (not user-visible), never
+ // in a context
+
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
@@ -495,16 +504,10 @@ enum VariableMode {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
+ DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
// variable is local and where it is unless it
// has been shadowed by an eval-introduced
// variable
-
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY // temporary variables (not user-visible), never
- // in a context
};
@@ -514,17 +517,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= CONST_HARMONY;
+ return mode >= VAR && mode <= MODULE;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= CONST_HARMONY;
+ return mode >= LET && mode <= MODULE;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || mode == CONST_HARMONY;
+ return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
}
diff --git a/src/3rdparty/v8/src/v8natives.js b/src/3rdparty/v8/src/v8natives.js
index 20fc74d..356ce88 100644
--- a/src/3rdparty/v8/src/v8natives.js
+++ b/src/3rdparty/v8/src/v8natives.js
@@ -231,10 +231,9 @@ $Object.prototype.constructor = $Object;
// ECMA-262 - 15.2.4.2
function ObjectToString() {
- if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- return '[object Undefined]';
- }
- if (IS_NULL(this)) return '[object Null]';
+ if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
+ if (IS_NULL(this)) return "[object Null]";
+ if (IS_SYMBOL(this)) return "[object Symbol]";
return "[object " + %_ClassOf(ToObject(this)) + "]";
}
@@ -893,16 +892,35 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
}
// Make sure the below call to DefineObjectProperty() doesn't overwrite
// any magic "length" property by removing the value.
+ // TODO(mstarzinger): This hack should be removed once we have addressed the
+ // respective TODO in Runtime_DefineOrRedefineDataProperty.
+ // For the time being, we need a hack to prevent Object.observe from
+ // generating two change records.
+ var isObserved = %IsObserved(obj);
+ if (isObserved) %SetIsObserved(obj, false);
obj.length = new_length;
desc.value_ = void 0;
desc.hasValue_ = false;
- if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+ threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
+ if (isObserved) %SetIsObserved(obj, true);
+ if (threw) {
if (should_throw) {
throw MakeTypeError("redefine_disallowed", [p]);
} else {
return false;
}
}
+ if (isObserved) {
+ var new_desc = GetOwnProperty(obj, "length");
+ var updated = length_desc.value_ !== new_desc.value_;
+ var reconfigured = length_desc.writable_ !== new_desc.writable_ ||
+ length_desc.configurable_ !== new_desc.configurable_ ||
+ length_desc.enumerable_ !== new_desc.configurable_;
+ if (updated || reconfigured) {
+ NotifyChange(reconfigured ? "reconfigured" : "updated",
+ obj, "length", length_desc.value_);
+ }
+ }
return true;
}
@@ -970,7 +988,7 @@ function ToStringArray(obj, trap) {
}
var n = ToUint32(obj.length);
var array = new $Array(n);
- var names = {}; // TODO(rossberg): use sets once they are ready.
+ var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
for (var index = 0; index < n; index++) {
var s = ToString(obj[index]);
if (%HasLocalProperty(names, s)) {
@@ -999,9 +1017,13 @@ function ObjectGetOwnPropertyNames(obj) {
// Get the local element names.
var propertyNames = %GetLocalElementNames(obj);
+ for (var i = 0; i < propertyNames.length; ++i) {
+ propertyNames[i] = %_NumberToString(propertyNames[i]);
+ }
// Get names for indexed interceptor properties.
- if (%GetInterceptorInfo(obj) & 1) {
+ var interceptorInfo = %GetInterceptorInfo(obj);
+ if ((interceptorInfo & 1) != 0) {
var indexedInterceptorNames =
%GetIndexedInterceptorElementNames(obj);
if (indexedInterceptorNames) {
@@ -1015,8 +1037,7 @@ function ObjectGetOwnPropertyNames(obj) {
propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj));
// Get names for named interceptor properties if any.
-
- if (%GetInterceptorInfo(obj) & 2) {
+ if ((interceptorInfo & 2) != 0) {
var namedInterceptorNames =
%GetNamedInterceptorPropertyNames(obj);
if (namedInterceptorNames) {
@@ -1024,21 +1045,24 @@ function ObjectGetOwnPropertyNames(obj) {
}
}
- // Property names are expected to be unique strings.
- var propertySet = {};
- var j = 0;
- for (var i = 0; i < propertyNames.length; ++i) {
- var name = ToString(propertyNames[i]);
- // We need to check for the exact property value since for intrinsic
- // properties like toString if(propertySet["toString"]) will always
- // succeed.
- if (propertySet[name] === true) {
- continue;
+ // Property names are expected to be unique strings,
+ // but interceptors can interfere with that assumption.
+ if (interceptorInfo != 0) {
+ var propertySet = { __proto__: null };
+ var j = 0;
+ for (var i = 0; i < propertyNames.length; ++i) {
+ var name = ToString(propertyNames[i]);
+ // We need to check for the exact property value since for intrinsic
+ // properties like toString if(propertySet["toString"]) will always
+ // succeed.
+ if (propertySet[name] === true) {
+ continue;
+ }
+ propertySet[name] = true;
+ propertyNames[j++] = name;
}
- propertySet[name] = true;
- propertyNames[j++] = name;
+ propertyNames.length = j;
}
- propertyNames.length = j;
return propertyNames;
}
@@ -1066,7 +1090,7 @@ function ObjectDefineProperty(obj, p, attributes) {
// Clone the attributes object for protection.
// TODO(rossberg): not spec'ed yet, so not sure if this should involve
// non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = {};
+ var attributesClone = { __proto__: null };
for (var a in attributes) {
attributesClone[a] = attributes[a];
}
@@ -1221,16 +1245,16 @@ function ObjectIsSealed(obj) {
if (%IsJSProxy(obj)) {
return false;
}
+ if (%IsExtensible(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnProperty(obj, name);
if (desc.isConfigurable()) return false;
}
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
+ return true;
}
@@ -1242,6 +1266,9 @@ function ObjectIsFrozen(obj) {
if (%IsJSProxy(obj)) {
return false;
}
+ if (%IsExtensible(obj)) {
+ return false;
+ }
var names = ObjectGetOwnPropertyNames(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
@@ -1249,10 +1276,7 @@ function ObjectIsFrozen(obj) {
if (IsDataDescriptor(desc) && desc.isWritable()) return false;
if (desc.isConfigurable()) return false;
}
- if (!ObjectIsExtensible(obj)) {
- return true;
- }
- return false;
+ return true;
}
@@ -1413,11 +1437,7 @@ function NumberToString(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toLocaleString"]);
- }
- return this.toString();
+ return %_CallFunction(this, NumberToString);
}
@@ -1434,50 +1454,76 @@ function NumberValueOf() {
// ECMA-262 section 15.7.4.5
function NumberToFixed(fractionDigits) {
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toFixed", this]);
+ }
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
+ }
var f = TO_INTEGER(fractionDigits);
+
if (f < 0 || f > 20) {
throw new $RangeError("toFixed() digits argument must be between 0 and 20");
}
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toFixed"]);
- }
- var x = ToNumber(this);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == 1/0) return "Infinity";
+ if (x == -1/0) return "-Infinity";
+
return %NumberToFixed(x, f);
}
// ECMA-262 section 15.7.4.6
function NumberToExponential(fractionDigits) {
- var f = -1;
- if (!IS_UNDEFINED(fractionDigits)) {
- f = TO_INTEGER(fractionDigits);
- if (f < 0 || f > 20) {
- throw new $RangeError(
- "toExponential() argument must be between 0 and 20");
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toExponential", this]);
}
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
}
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toExponential"]);
+ var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == 1/0) return "Infinity";
+ if (x == -1/0) return "-Infinity";
+
+ if (IS_UNDEFINED(f)) {
+ f = -1; // Signal for runtime function that f is not defined.
+ } else if (f < 0 || f > 20) {
+ throw new $RangeError("toExponential() argument must be between 0 and 20");
}
- var x = ToNumber(this);
return %NumberToExponential(x, f);
}
// ECMA-262 section 15.7.4.7
function NumberToPrecision(precision) {
- if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
- throw MakeTypeError("called_on_null_or_undefined",
- ["Number.prototype.toPrecision"]);
+ var x = this;
+ if (!IS_NUMBER(this)) {
+ if (!IS_NUMBER_WRAPPER(this)) {
+ throw MakeTypeError("incompatible_method_receiver",
+ ["Number.prototype.toPrecision", this]);
+ }
+ // Get the value of this number in case it's an object.
+ x = %_ValueOf(this);
}
if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
var p = TO_INTEGER(precision);
+
+ if (NUMBER_IS_NAN(x)) return "NaN";
+ if (x == 1/0) return "Infinity";
+ if (x == -1/0) return "-Infinity";
+
if (p < 1 || p > 21) {
throw new $RangeError("toPrecision() argument must be between 1 and 21");
}
- var x = ToNumber(this);
return %NumberToPrecision(x, p);
}
diff --git a/src/3rdparty/v8/src/v8threads.cc b/src/3rdparty/v8/src/v8threads.cc
index 32ea5e1..925e198 100644
--- a/src/3rdparty/v8/src/v8threads.cc
+++ b/src/3rdparty/v8/src/v8threads.cc
@@ -42,15 +42,18 @@ namespace v8 {
bool Locker::active_ = false;
-// Constructor for the Locker object. Once the Locker is constructed the
-// current thread will be guaranteed to have the lock for a given isolate.
-Locker::Locker(v8::Isolate* isolate)
- : has_lock_(false),
- top_level_(true),
- isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
- if (isolate_ == NULL) {
- isolate_ = i::Isolate::GetDefaultIsolateForLocking();
- }
+Locker::Locker() {
+ Initialize(i::Isolate::GetDefaultIsolateForLocking());
+}
+
+
+// Once the Locker is initialized, the current thread will be guaranteed to have
+// the lock for a given isolate.
+void Locker::Initialize(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
+ has_lock_= false;
+ top_level_ = true;
+ isolate_ = reinterpret_cast<i::Isolate*>(isolate);
// Record that the Locker has been used at least once.
active_ = true;
// Get the big lock if necessary.
@@ -86,10 +89,8 @@ Locker::Locker(v8::Isolate* isolate)
bool Locker::IsLocked(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (internal_isolate == NULL) {
- internal_isolate = i::Isolate::GetDefaultIsolateForLocking();
- }
return internal_isolate->thread_manager()->IsLockedByCurrentThread();
}
@@ -115,11 +116,14 @@ Locker::~Locker() {
}
-Unlocker::Unlocker(v8::Isolate* isolate)
- : isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
- if (isolate_ == NULL) {
- isolate_ = i::Isolate::GetDefaultIsolateForLocking();
- }
+Unlocker::Unlocker() {
+ Initialize(i::Isolate::GetDefaultIsolateForLocking());
+}
+
+
+void Unlocker::Initialize(v8::Isolate* isolate) {
+ ASSERT(isolate != NULL);
+ isolate_ = reinterpret_cast<i::Isolate*>(isolate);
ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
if (isolate_->IsDefaultIsolate()) {
isolate_->Exit();
@@ -479,7 +483,7 @@ void ContextSwitcher::Run() {
// Acknowledge the preemption by the receiving thread.
void ContextSwitcher::PreemptionReceived() {
- ASSERT(Locker::IsLocked());
+ ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking()));
// There is currently no accounting being done for this. But could be in the
// future, which is why we leave this in.
}
diff --git a/src/3rdparty/v8/src/v8utils.cc b/src/3rdparty/v8/src/v8utils.cc
index 4ab97ed..2dfc1ea 100644
--- a/src/3rdparty/v8/src/v8utils.cc
+++ b/src/3rdparty/v8/src/v8utils.cc
@@ -279,97 +279,4 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) {
}
}
-
-MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(false) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup)
- : filename_(NULL),
- data_(NULL),
- length_(0),
- remove_file_on_cleanup_(remove_file_on_cleanup) {
- Init(filename);
-}
-
-
-MemoryMappedExternalResource::~MemoryMappedExternalResource() {
- // Release the resources if we had successfully acquired them:
- if (file_ != NULL) {
- delete file_;
- if (remove_file_on_cleanup_) {
- OS::Remove(filename_);
- }
- DeleteArray<char>(filename_);
- }
-}
-
-
-void MemoryMappedExternalResource::Init(const char* filename) {
- file_ = OS::MemoryMappedFile::open(filename);
- if (file_ != NULL) {
- filename_ = StrDup(filename);
- data_ = reinterpret_cast<char*>(file_->memory());
- length_ = file_->size();
- }
-}
-
-
-bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const {
- bool is_ascii = true;
-
- int line_no = 1;
- const char* start_of_line = data_;
- const char* end = data_ + length_;
- for (const char* p = data_; p < end; p++) {
- char c = *p;
- if ((c & 0x80) != 0) {
- // Non-ASCII detected:
- is_ascii = false;
-
- // Report the error and abort if appropriate:
- if (abort_if_failed) {
- int char_no = static_cast<int>(p - start_of_line) - 1;
-
- ASSERT(filename_ != NULL);
- PrintF("\n\n\n"
- "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d",
- c, filename_, line_no, char_no);
-
- // Allow for some context up to kNumberOfLeadingContextChars chars
- // before the offending non-ASCII char to help the user see where
- // the offending char is.
- const int kNumberOfLeadingContextChars = 10;
- const char* err_context = p - kNumberOfLeadingContextChars;
- if (err_context < data_) {
- err_context = data_;
- }
- // Compute the length of the error context and print it.
- int err_context_length = static_cast<int>(p - err_context);
- if (err_context_length != 0) {
- PrintF(" after \"%.*s\"", err_context_length, err_context);
- }
- PrintF(".\n\n\n");
- OS::Abort();
- }
-
- break; // Non-ASCII detected. No need to continue scanning.
- }
- if (c == '\n') {
- start_of_line = p;
- line_no++;
- }
- }
-
- return is_ascii;
-}
-
-
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/v8utils.h b/src/3rdparty/v8/src/v8utils.h
index cb018e6..937e93d 100644
--- a/src/3rdparty/v8/src/v8utils.h
+++ b/src/3rdparty/v8/src/v8utils.h
@@ -202,13 +202,44 @@ Vector<const char> ReadFile(FILE* file,
bool verbose = true);
+template <typename sourcechar, typename sinkchar>
+INLINE(static void CopyCharsUnsigned(sinkchar* dest,
+ const sourcechar* src,
+ int chars));
+
// Copy from ASCII/16bit chars to ASCII/16bit chars.
template <typename sourcechar, typename sinkchar>
INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
+template<typename sourcechar, typename sinkchar>
+void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+ ASSERT(sizeof(sourcechar) <= 2);
+ ASSERT(sizeof(sinkchar) <= 2);
+ if (sizeof(sinkchar) == 1) {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ } else {
+ if (sizeof(sourcechar) == 1) {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint8_t*>(src),
+ chars);
+ } else {
+ CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest),
+ reinterpret_cast<const uint16_t*>(src),
+ chars);
+ }
+ }
+}
template <typename sourcechar, typename sinkchar>
-void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
+void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
@@ -218,7 +249,8 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
// Number of characters in a uintptr_t.
static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT
- while (dest <= limit - kStepSize) {
+ ASSERT(dest + kStepSize > dest); // Check for overflow.
+ while (dest + kStepSize <= limit) {
*reinterpret_cast<uintptr_t*>(dest) =
*reinterpret_cast<const uintptr_t*>(src);
dest += kStepSize;
@@ -232,37 +264,6 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
}
-// A resource for using mmapped files to back external strings that are read
-// from files.
-class MemoryMappedExternalResource: public
- v8::String::ExternalAsciiStringResource {
- public:
- explicit MemoryMappedExternalResource(const char* filename);
- MemoryMappedExternalResource(const char* filename,
- bool remove_file_on_cleanup);
- virtual ~MemoryMappedExternalResource();
-
- virtual const char* data() const { return data_; }
- virtual size_t length() const { return length_; }
-
- bool exists() const { return file_ != NULL; }
- bool is_empty() const { return length_ == 0; }
-
- bool EnsureIsAscii(bool abort_if_failed) const;
- bool EnsureIsAscii() const { return EnsureIsAscii(true); }
- bool IsAscii() const { return EnsureIsAscii(false); }
-
- private:
- void Init(const char* filename);
-
- char* filename_;
- OS::MemoryMappedFile* file_;
-
- const char* data_;
- size_t length_;
- bool remove_file_on_cleanup_;
-};
-
class StringBuilder : public SimpleStringBuilder {
public:
explicit StringBuilder(int size) : SimpleStringBuilder(size) { }
diff --git a/src/3rdparty/v8/src/variables.cc b/src/3rdparty/v8/src/variables.cc
index 3e735d6..1333ca1 100644
--- a/src/3rdparty/v8/src/variables.cc
+++ b/src/3rdparty/v8/src/variables.cc
@@ -41,8 +41,9 @@ const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
- case CONST_HARMONY: return "CONST_HARMONY";
case LET: return "LET";
+ case CONST_HARMONY: return "CONST_HARMONY";
+ case MODULE: return "MODULE";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -76,7 +77,7 @@ Variable::Variable(Scope* scope,
interface_(interface),
is_qml_global_(false) {
// Names must be canonicalized for fast equality checks.
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
// Var declared variables never need initialization.
ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
}
@@ -85,7 +86,8 @@ Variable::Variable(Scope* scope,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_)
+ return (IsDynamicVariableMode(mode_) ||
+ (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
&& scope_ != NULL && scope_->is_global_scope();
}
diff --git a/src/3rdparty/v8/src/variables.h b/src/3rdparty/v8/src/variables.h
index d4e851b..f76da71 100644
--- a/src/3rdparty/v8/src/variables.h
+++ b/src/3rdparty/v8/src/variables.h
@@ -130,8 +130,8 @@ class Variable: public ZoneObject {
bool is_arguments() const { return kind_ == ARGUMENTS; }
// True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval() const {
- return IsVariable(FACTORY->eval_symbol());
+ bool is_possibly_eval(Isolate* isolate) const {
+ return IsVariable(isolate->factory()->eval_string());
}
Variable* local_if_not_shadowed() const {
diff --git a/src/3rdparty/v8/src/version.cc b/src/3rdparty/v8/src/version.cc
index 3132b5e..f448e3e 100644
--- a/src/3rdparty/v8/src/version.cc
+++ b/src/3rdparty/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 15
-#define BUILD_NUMBER 2
+#define MINOR_VERSION 17
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/3rdparty/v8/src/vm-state-inl.h b/src/3rdparty/v8/src/vm-state-inl.h
index 384940d..fae68eb 100644
--- a/src/3rdparty/v8/src/vm-state-inl.h
+++ b/src/3rdparty/v8/src/vm-state-inl.h
@@ -47,8 +47,8 @@ inline const char* StateToString(StateTag state) {
return "GC";
case COMPILER:
return "COMPILER";
- case PARALLEL_COMPILER_PROLOGUE:
- return "PARALLEL_COMPILER_PROLOGUE";
+ case PARALLEL_COMPILER:
+ return "PARALLEL_COMPILER";
case OTHER:
return "OTHER";
case EXTERNAL:
@@ -67,6 +67,10 @@ VMState::VMState(Isolate* isolate, StateTag tag)
LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
}
+ if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && tag == EXTERNAL) {
+ LOG(isolate_, EnterExternal());
+ }
+
isolate_->SetCurrentVMState(tag);
}
@@ -80,6 +84,11 @@ VMState::~VMState() {
UncheckedStringEvent("To", StateToString(previous_tag_)));
}
+ if (FLAG_log_timer_events &&
+ previous_tag_ != EXTERNAL && isolate_->current_vm_state() == EXTERNAL) {
+ LOG(isolate_, LeaveExternal());
+ }
+
isolate_->SetCurrentVMState(previous_tag_);
}
diff --git a/src/3rdparty/v8/src/x64/assembler-x64-inl.h b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
index f864174..67acbf0 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64-inl.h
+++ b/src/3rdparty/v8/src/x64/assembler-x64-inl.h
@@ -53,7 +53,7 @@ void Assembler::emitl(uint32_t x) {
void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) {
Memory::uint64_at(pc_) = x;
- if (rmode != RelocInfo::NONE) {
+ if (!RelocInfo::IsNone(rmode)) {
RecordRelocInfo(rmode, x);
}
pc_ += sizeof(uint64_t);
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.cc b/src/3rdparty/v8/src/x64/assembler-x64.cc
index 1f5bea9..0ac0862 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/assembler-x64.cc
@@ -46,6 +46,12 @@ uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures;
uint64_t CpuFeatures::found_by_runtime_probing_ = 0;
+ExternalReference ExternalReference::cpu_features() {
+ ASSERT(CpuFeatures::initialized_);
+ return ExternalReference(&CpuFeatures::supported_);
+}
+
+
void CpuFeatures::Probe() {
ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
#ifdef DEBUG
@@ -110,7 +116,7 @@ void CpuFeatures::Probe() {
__ or_(rdi, rcx);
// Get the sahf supported flag, from CPUID(0x80000001)
- __ movq(rax, 0x80000001, RelocInfo::NONE);
+ __ movq(rax, 0x80000001, RelocInfo::NONE64);
__ cpuid();
}
supported_ = kDefaultCpuFeatures;
@@ -173,7 +179,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
#endif
// Patch the code.
- patcher.masm()->movq(r10, target, RelocInfo::NONE);
+ patcher.masm()->movq(r10, target, RelocInfo::NONE64);
patcher.masm()->call(r10);
// Check that the size of the code generated is as expected.
@@ -201,7 +207,8 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Register constants.
-const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
+const int
+ Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15
0, 3, 2, 1, 7, 8, 9, 11, 14, 15
};
@@ -346,48 +353,20 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
- : AssemblerBase(arg_isolate),
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+ : AssemblerBase(isolate, buffer, buffer_size),
code_targets_(100),
positions_recorder_(this) {
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
-
- if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
- buffer = isolate()->assembler_spare_buffer();
- isolate()->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) {
- buffer_ = NewArray<byte>(buffer_size);
- } else {
- buffer_ = static_cast<byte*>(buffer);
- }
- buffer_size_ = buffer_size;
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
- own_buffer_ = false;
- }
-
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size); // int3
+ memset(buffer_, 0xCC, buffer_size_); // int3
}
#endif
- // Set up buffer pointers.
- ASSERT(buffer_ != NULL);
- pc_ = buffer_;
- reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
#ifdef GENERATED_CODE_COVERAGE
@@ -396,19 +375,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
}
-Assembler::~Assembler() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -1538,14 +1504,13 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) {
// Non-relocatable values might not need a 64-bit representation.
- if (rmode == RelocInfo::NONE) {
- // Sadly, there is no zero or sign extending move for 8-bit immediates.
- if (is_int32(value)) {
- movq(dst, Immediate(static_cast<int32_t>(value)));
- return;
- } else if (is_uint32(value)) {
+ if (RelocInfo::IsNone(rmode)) {
+ if (is_uint32(value)) {
movl(dst, Immediate(static_cast<int32_t>(value)));
return;
+ } else if (is_int32(value)) {
+ movq(dst, Immediate(static_cast<int32_t>(value)));
+ return;
}
// Value cannot be represented by 32 bits, so do a full 64 bit immediate
// value.
@@ -1598,11 +1563,11 @@ void Assembler::movl(const Operand& dst, Label* src) {
void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) {
// If there is no relocation info, emit the value of the handle efficiently
// (possibly using less that 8 bytes for the value).
- if (mode == RelocInfo::NONE) {
+ if (RelocInfo::IsNone(mode)) {
// There is no possible reason to store a heap pointer without relocation
// info, so it must be a smi.
ASSERT(value->IsSmi());
- movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE);
+ movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64);
} else {
EnsureSpace ensure_space(this);
ASSERT(value->IsHeapObject());
@@ -1686,6 +1651,15 @@ void Assembler::movzxwl(Register dst, const Operand& src) {
}
+void Assembler::movzxwl(Register dst, Register src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB7);
+ emit_modrm(dst, src);
+}
+
+
void Assembler::repmovsb() {
EnsureSpace ensure_space(this);
emit(0xF3);
@@ -2848,6 +2822,16 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::addsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x58);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2858,6 +2842,16 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::mulsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x59);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2972,6 +2966,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
+void Assembler::movmskps(Register dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0f);
+ emit(0x50);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@@ -3006,7 +3009,7 @@ void Assembler::dd(uint32_t data) {
// Relocation information implementations.
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- ASSERT(rmode != RelocInfo::NONE);
+ ASSERT(!RelocInfo::IsNone(rmode));
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
diff --git a/src/3rdparty/v8/src/x64/assembler-x64.h b/src/3rdparty/v8/src/x64/assembler-x64.h
index 5f9e147..69eeb8e 100644
--- a/src/3rdparty/v8/src/x64/assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/assembler-x64.h
@@ -95,21 +95,24 @@ struct Register {
// r10 - fixed scratch register
// r12 - smi constant register
// r13 - root register
+ static const int kMaxNumAllocatableRegisters = 10;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"rax",
"rbx",
@@ -157,7 +160,7 @@ struct Register {
int code_;
private:
- static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
+ static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
@@ -199,8 +202,11 @@ const Register no_reg = { kRegister_no_reg_Code };
struct XMMRegister {
- static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ static const int kMaxNumRegisters = 16;
+ static const int kMaxNumAllocatableRegisters = 15;
+ static int NumAllocatableRegisters() {
+ return kMaxNumAllocatableRegisters;
+ }
static int ToAllocationIndex(XMMRegister reg) {
ASSERT(reg.code() != 0);
@@ -208,13 +214,13 @@ struct XMMRegister {
}
static XMMRegister FromAllocationIndex(int index) {
- ASSERT(0 <= index && index < kNumAllocatableRegisters);
+ ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
XMMRegister result = { index + 1 };
return result;
}
static const char* AllocationIndexToString(int index) {
- ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
const char* const names[] = {
"xmm1",
"xmm2",
@@ -237,11 +243,11 @@ struct XMMRegister {
static XMMRegister from_code(int code) {
ASSERT(code >= 0);
- ASSERT(code < kNumRegisters);
+ ASSERT(code < kMaxNumRegisters);
XMMRegister r = { code };
return r;
}
- bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+ bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
bool is(XMMRegister reg) const { return code_ == reg.code_; }
int code() const {
ASSERT(is_valid());
@@ -524,6 +530,7 @@ class CpuFeatures : public AllStatic {
static uint64_t supported_;
static uint64_t found_by_runtime_probing_;
+ friend class ExternalReference;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@@ -556,7 +563,7 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- ~Assembler();
+ virtual ~Assembler() { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -726,6 +733,7 @@ class Assembler : public AssemblerBase {
void movzxbl(Register dst, const Operand& src);
void movzxwq(Register dst, const Operand& src);
void movzxwl(Register dst, const Operand& src);
+ void movzxwl(Register dst, Register src);
// Repeated moves.
@@ -1363,8 +1371,10 @@ class Assembler : public AssemblerBase {
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
+ void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
+ void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1386,6 +1396,7 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
+ void movmskps(Register dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1416,8 +1427,6 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Check if there is less than kGap bytes available in the buffer.
@@ -1436,7 +1445,6 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
- static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
@@ -1628,15 +1636,7 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
- // Code buffer:
- // The buffer into which code and relocation info are generated.
- byte* buffer_;
- int buffer_size_;
- // True if the assembler owns the buffer, false if buffer is external.
- bool own_buffer_;
-
// code generation
- byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
diff --git a/src/3rdparty/v8/src/x64/builtins-x64.cc b/src/3rdparty/v8/src/x64/builtins-x64.cc
index ed0ec68..144962b 100644
--- a/src/3rdparty/v8/src/x64/builtins-x64.cc
+++ b/src/3rdparty/v8/src/x64/builtins-x64.cc
@@ -389,6 +389,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
__ j(above_equal, &exit);
+ // Symbols are "objects".
+ __ CmpInstanceType(rcx, SYMBOL_TYPE);
+ __ j(equal, &exit);
+
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
__ bind(&use_receiver);
@@ -523,6 +527,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code.
if (is_construct) {
+ // No type feedback cell is available
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_sentinel);
// Expects rdi to hold function pointer.
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ CallStub(&stub);
@@ -646,6 +654,25 @@ CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Preserve registers across notification, this is important for compiled
+ // stubs that tail call the runtime on deopts passing their parameters in
+ // registers.
+ __ Pushad();
+ __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+ __ Popad();
+ // Tear down internal frame.
+ }
+
+ __ pop(MemOperand(rsp, 0)); // Ignore state offset
+ __ ret(0); // Return to IC Miss stub, continuation still on stack.
+}
+
+
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
@@ -660,17 +687,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it.
- __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+ __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize));
// Switch on the state.
Label not_no_registers, not_tos_rax;
- __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS));
__ j(not_equal, &not_no_registers, Label::kNear);
__ ret(1 * kPointerSize); // Remove state.
__ bind(&not_no_registers);
__ movq(rax, Operand(rsp, 2 * kPointerSize));
- __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG));
__ j(not_equal, &not_tos_rax, Label::kNear);
__ ret(2 * kPointerSize); // Remove state, rax.
@@ -1484,30 +1511,62 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -----------------------------------
- Label generic_constructor;
-
if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
+
// Initial map for the builtin Array function should be a map.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
- Condition not_smi = NegateCondition(masm->CheckSmi(rbx));
+ Condition not_smi = NegateCondition(masm->CheckSmi(rcx));
__ Check(not_smi, "Unexpected initial map for Array function");
- __ CmpObjectType(rbx, MAP_TYPE, rcx);
+ __ CmpObjectType(rcx, MAP_TYPE, rcx);
__ Check(equal, "Unexpected initial map for Array function");
- }
- // Run the native code for the Array function called as constructor.
- ArrayNativeCode(masm, &generic_constructor);
+ if (FLAG_optimize_constructed_arrays) {
+ // We should either have undefined in ebx or a valid jsglobalpropertycell
+ Label okay_here;
+ Handle<Object> undefined_sentinel(
+ masm->isolate()->factory()->undefined_value());
+ Handle<Map> global_property_cell_map(
+ masm->isolate()->heap()->global_property_cell_map());
+ __ Cmp(rbx, undefined_sentinel);
+ __ j(equal, &okay_here);
+ __ Cmp(FieldOperand(rbx, 0), global_property_cell_map);
+ __ Assert(equal, "Expected property cell in register rbx");
+ __ bind(&okay_here);
+ }
+ }
- // Jump to the generic construct code in case the specialized code cannot
- // handle the construction.
- __ bind(&generic_constructor);
- Handle<Code> generic_construct_stub =
- masm->isolate()->builtins()->JSConstructStubGeneric();
- __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ if (FLAG_optimize_constructed_arrays) {
+ Label not_zero_case, not_one_case;
+ __ testq(rax, rax);
+ __ j(not_zero, &not_zero_case);
+ ArrayNoArgumentConstructorStub no_argument_stub;
+ __ TailCallStub(&no_argument_stub);
+
+ __ bind(&not_zero_case);
+ __ cmpq(rax, Immediate(1));
+ __ j(greater, &not_one_case);
+ ArraySingleArgumentConstructorStub single_argument_stub;
+ __ TailCallStub(&single_argument_stub);
+
+ __ bind(&not_one_case);
+ ArrayNArgumentsConstructorStub n_argument_stub;
+ __ TailCallStub(&n_argument_stub);
+ } else {
+ Label generic_constructor;
+ // Run the native code for the Array function called as constructor.
+ ArrayNativeCode(masm, &generic_constructor);
+
+ // Jump to the generic construct code in case the specialized code cannot
+ // handle the construction.
+ __ bind(&generic_constructor);
+ Handle<Code> generic_construct_stub =
+ masm->isolate()->builtins()->JSConstructStubGeneric();
+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
+ }
}
@@ -1620,7 +1679,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// Load the empty string into rbx, remove the receiver from the
// stack, and jump back to the case where the argument is a string.
__ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
__ pop(rcx);
__ lea(rsp, Operand(rsp, kPointerSize));
__ push(rcx);
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.cc b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
index 06ce52a..c4dd865 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.cc
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.cc
@@ -32,10 +32,85 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
+#include "stub-cache.h"
+#include "runtime.h"
namespace v8 {
namespace internal {
+
+void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx, rdx };
+ descriptor->register_param_count_ = 4;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+}
+
+
+void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rdx, rax };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
+void TransitionElementsKindStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
+}
+
+
+static void InitializeArrayConstructorDescriptor(Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ // register state
+ // rdi -- constructor function
+ // rbx -- type info cell with elements kind
+ // rax -- number of arguments to the constructor function
+ static Register registers[] = { rdi, rbx };
+ descriptor->register_param_count_ = 2;
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->stack_parameter_count_ = &rax;
+ descriptor->register_params_ = registers;
+ descriptor->extra_expression_stack_count_ = 1;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(ArrayConstructor_StubFailure);
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ InitializeArrayConstructorDescriptor(isolate, descriptor);
+}
+
+
#define __ ACCESS_MASM(masm)
void ToNumberStub::Generate(MacroAssembler* masm) {
@@ -221,8 +296,10 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)), rbx);
// Copy the qmlglobal object from the previous context.
- __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
- __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)), rbx);
+ __ movq(rbx,
+ Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_OBJECT_INDEX)),
+ rbx);
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -312,6 +389,7 @@ static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
+ AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
//
@@ -325,11 +403,26 @@ static void GenerateFastCloneShallowArrayCommon(
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
- int size = JSArray::kSize + elements_size;
+ int size = JSArray::kSize;
+ int allocation_info_start = size;
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
- __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
+ AllocationFlags flags = TAG_OBJECT;
+ if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
+ flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
+ }
+ __ AllocateInNewSpace(size, rax, rbx, rdx, fail, flags);
+
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
+ __ movq(FieldOperand(rax, allocation_info_start), kScratchRegister);
+ __ movq(FieldOperand(rax, allocation_info_start + kPointerSize), rcx);
+ }
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -343,7 +436,11 @@ static void GenerateFastCloneShallowArrayCommon(
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ lea(rdx, Operand(rax, JSArray::kSize));
+ if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
+ __ lea(rdx, Operand(rax, JSArray::kSize + AllocationSiteInfo::kSize));
+ } else {
+ __ lea(rdx, Operand(rax, JSArray::kSize));
+ }
__ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
// Copy the elements array.
@@ -396,16 +493,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
factory->fixed_cow_array_map());
__ j(not_equal, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0,
- COPY_ON_WRITE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
__ ret(3 * kPointerSize);
__ bind(&check_fast_elements);
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
factory->fixed_array_map());
__ j(not_equal, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_,
- CLONE_ELEMENTS, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
+ allocation_site_mode_,
+ &slow_case);
__ ret(3 * kPointerSize);
__ bind(&double_elements);
@@ -435,7 +534,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(rcx);
}
- GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode,
+ allocation_site_mode_,
+ &slow_case);
__ ret(3 * kPointerSize);
__ bind(&slow_case);
@@ -443,49 +544,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: object literal flags.
- // [rsp + (2 * kPointerSize)]: constant properties.
- // [rsp + (3 * kPointerSize)]: literal index.
- // [rsp + (4 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ movq(rcx, Operand(rsp, 4 * kPointerSize));
- __ movq(rax, Operand(rsp, 3 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
-
- // Check that the boilerplate contains only fast properties and we can
- // statically determine the instance size.
- int size = JSObject::kHeaderSize + length_ * kPointerSize;
- __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
- __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
- __ j(not_equal, &slow_case);
-
- // Allocate the JS object and copy header together with all in-object
- // properties from the boilerplate.
- __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
- for (int i = 0; i < size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
-
- // Return and remove the on-stack parameters.
- __ ret(4 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
-}
-
-
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@@ -645,6 +703,10 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
class FloatingPointHelper : public AllStatic {
public:
+ enum ConvertUndefined {
+ CONVERT_UNDEFINED_TO_ZERO,
+ BAILOUT_ON_UNDEFINED
+ };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
@@ -680,7 +742,8 @@ class FloatingPointHelper : public AllStatic {
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis);
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined);
};
@@ -760,8 +823,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) {
case UnaryOpIC::SMI:
GenerateSmiStub(masm);
break;
- case UnaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
+ case UnaryOpIC::NUMBER:
+ GenerateNumberStub(masm);
break;
case UnaryOpIC::GENERIC:
GenerateGenericStub(masm);
@@ -842,13 +905,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
// TODO(svenpanne): Use virtual functions instead of switch.
-void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::SUB:
- GenerateHeapNumberStubSub(masm);
+ GenerateNumberStubSub(masm);
break;
case Token::BIT_NOT:
- GenerateHeapNumberStubBitNot(masm);
+ GenerateNumberStubBitNot(masm);
break;
default:
UNREACHABLE();
@@ -856,7 +919,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
+void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) {
Label non_smi, slow, call_builtin;
GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
__ bind(&non_smi);
@@ -868,7 +931,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
}
-void UnaryOpStub::GenerateHeapNumberStubBitNot(
+void UnaryOpStub::GenerateNumberStubBitNot(
MacroAssembler* masm) {
Label non_smi, slow;
GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
@@ -1005,16 +1068,15 @@ void UnaryOpStub::PrintName(StringStream* stream) {
}
+void BinaryOpStub::Initialize() {}
+
+
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
- __ Push(Smi::FromInt(op_));
- __ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
@@ -1023,69 +1085,16 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 5,
+ 3,
1);
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- switch (operands_type_) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- UNREACHABLE();
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::BOTH_STRING:
- GenerateBothStringStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
-}
-
-
-void BinaryOpStub::GenerateSmiCode(
+static void BinaryOpStub_GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
+ Token::Value op) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
@@ -1094,9 +1103,9 @@ void BinaryOpStub::GenerateSmiCode(
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
- (op_ == Token::ADD || op_ == Token::SUB ||
- op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
+ (op == Token::ADD || op == Token::SUB ||
+ op == Token::MUL || op == Token::DIV || op == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
@@ -1104,7 +1113,7 @@ void BinaryOpStub::GenerateSmiCode(
Label use_fp_on_smis;
Label fail;
- if (op_ != Token::BIT_OR) {
+ if (op != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
@@ -1113,7 +1122,7 @@ void BinaryOpStub::GenerateSmiCode(
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
- switch (op_) {
+ switch (op) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
@@ -1185,7 +1194,7 @@ void BinaryOpStub::GenerateSmiCode(
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
- if (op_ == Token::DIV || op_ == Token::MOD) {
+ if (op == Token::DIV || op == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
@@ -1194,12 +1203,12 @@ void BinaryOpStub::GenerateSmiCode(
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1222,31 +1231,50 @@ void BinaryOpStub::GenerateSmiCode(
// values that could be smi.
__ bind(&not_smis);
Comment done_comment(masm, "-- Enter non-smi code");
+ FloatingPointHelper::ConvertUndefined convert_undefined =
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED;
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
+ if (op == Token::BIT_AND ||
+ op == Token::BIT_OR ||
+ op == Token::BIT_XOR ||
+ op == Token::SAR ||
+ op == Token::SHL ||
+ op == Token::SHR) {
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
+ }
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail);
+ &smi_values, &fail, convert_undefined);
__ jmp(&smi_values);
__ bind(&fail);
}
-void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure) {
- switch (op_) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode);
+
+
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure,
+ Token::Value op,
+ OverwriteMode mode) {
+ switch (op) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
- switch (op_) {
+ switch (op) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- GenerateHeapResultAllocation(masm, allocation_failure);
+ BinaryOpStub_GenerateHeapResultAllocation(
+ masm, allocation_failure, mode);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
@@ -1267,7 +1295,7 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
- switch (op_) {
+ switch (op) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
@@ -1291,7 +1319,7 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
- if (op_ == Token::SHR) {
+ if (op == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
@@ -1328,12 +1356,12 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "BinaryStub::GenerateFloatingPointCode.");
+ "BinaryStub_GenerateFloatingPointCode.");
}
}
-void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -1364,58 +1392,17 @@ void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
}
-void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
- GenerateRegisterArgsPush(masm);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -1424,24 +1411,22 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
}
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(operands_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- GenerateStringAddCode(masm);
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateTypeTransition(masm);
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ UNREACHABLE();
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1475,7 +1460,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
// Convert oddball arguments to numbers.
@@ -1498,43 +1483,83 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
__ bind(&done);
- GenerateHeapNumberStub(masm);
+ GenerateNumberStub(masm);
+}
+
+
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
+ Register input,
+ Label* fail) {
+ Label ok;
+ __ JumpIfSmi(input, &ok, Label::kNear);
+ Register heap_number_map = r8;
+ Register scratch1 = r9;
+ Register scratch2 = r10;
+ // HeapNumbers containing 32bit integer values are also allowed.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
+ __ j(not_equal, fail);
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
+ // Convert, convert back, and compare the two doubles' bits.
+ __ cvttsd2siq(scratch2, xmm0);
+ __ cvtlsi2sd(xmm1, scratch2);
+ __ movq(scratch1, xmm0);
+ __ movq(scratch2, xmm1);
+ __ cmpq(scratch1, scratch2);
+ __ j(not_equal, fail);
+ __ bind(&ok);
}
-void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
- GenerateFloatingPointCode(masm, &gc_required, &not_number);
+
+ // It could be that only SMIs have been seen at either the left
+ // or the right operand. For precise type feedback, patch the IC
+ // again if this changes.
+ if (left_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
+ }
+ if (right_type_ == BinaryOpIC::SMI) {
+ BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
+ }
+
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &gc_required, &not_number, op_, mode_);
__ bind(&not_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
+ BinaryOpStub_GenerateSmiCode(
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
- GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
+ BinaryOpStub_GenerateFloatingPointCode(
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
- GenerateStringAddCode(masm);
+ GenerateAddStrings(masm);
}
__ bind(&call_runtime);
- GenerateCallRuntimeCode(masm);
+ GenerateRegisterArgsPush(masm);
+ GenerateCallRuntime(masm);
}
-void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure) {
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure,
+ OverwriteMode mode) {
Label skip_allocation;
- OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
@@ -2030,17 +2055,21 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis) {
+ Label* on_not_smis,
+ ConvertUndefined convert_undefined) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
- Label done;
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_first
+ : on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2053,11 +2082,15 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
+ __ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
__ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, on_not_smis);
+ __ j(not_equal,
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
+ ? &maybe_undefined_second
+ : on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2070,8 +2103,25 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
if (on_success != NULL) {
__ jmp(on_success);
} else {
- __ bind(&done);
+ __ jmp(&done);
+ }
+
+ __ bind(&maybe_undefined_first);
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(first, first);
+ __ jmp(&first_done);
+
+ __ bind(&maybe_undefined_second);
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, on_not_smis);
+ __ xor_(second, second);
+ if (on_success != NULL) {
+ __ jmp(on_success);
}
+ // Else: fall through.
+
+ __ bind(&done);
}
@@ -2152,7 +2202,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label continue_sqrt, continue_rsqrt, not_plus_half;
// Test for 0.5.
// Load double_scratch with 0.5.
- __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
// Already ruled out NaNs for exponent.
__ ucomisd(double_scratch, double_exponent);
@@ -2162,7 +2212,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -2194,7 +2244,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
// According to IEEE-754, double-precision -Infinity has the highest
// 12 bits set and the lowest 52 bits cleared.
- __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(double_scratch, scratch);
__ ucomisd(double_scratch, double_base);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -2237,7 +2287,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
@@ -2339,6 +2389,156 @@ void MathPowStub::Generate(MacroAssembler* masm) {
}
+void ArrayLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Cmp(rax, masm->isolate()->factory()->length_string());
+ receiver = rdx;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ receiver = rax;
+ }
+
+ StubCompiler::GenerateLoadArrayLength(masm, receiver, r8, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Cmp(rax, masm->isolate()->factory()->prototype_string());
+ receiver = rdx;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ receiver = rax;
+ }
+
+ StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StringLengthStub::Generate(MacroAssembler* masm) {
+ Label miss;
+ Register receiver;
+ if (kind() == Code::KEYED_LOAD_IC) {
+ // ----------- S t a t e -------------
+ // -- rax : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ Cmp(rax, masm->isolate()->factory()->length_string());
+ receiver = rdx;
+ } else {
+ ASSERT(kind() == Code::LOAD_IC);
+ // ----------- S t a t e -------------
+ // -- rax : receiver
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -----------------------------------
+ receiver = rax;
+ }
+
+ StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
+ support_wrapper_);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, kind());
+}
+
+
+void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : key
+ // -- rdx : receiver
+ // -- rsp[0] : return address
+ // -----------------------------------
+ //
+ // This accepts as a receiver anything JSArray::SetElementsLength accepts
+ // (currently anything except for external arrays which means anything with
+ // elements of FixedArray type). Value must be a number, but only smis are
+ // accepted as the most common case.
+
+ Label miss;
+
+ Register receiver = rdx;
+ Register value = rax;
+ Register scratch = rbx;
+ if (kind() == Code::KEYED_STORE_IC) {
+ __ Cmp(rcx, masm->isolate()->factory()->length_string());
+ }
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, &miss);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that elements are FixedArray.
+ // We rely on StoreIC_ArrayLength below to deal with all types of
+ // fast elements (including COW).
+ __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
+ __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
+ __ j(not_equal, &miss);
+
+ // Check that the array has fast properties, otherwise the length
+ // property might have been redefined.
+ __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
+ __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
+ Heap::kHashTableMapRootIndex);
+ __ j(equal, &miss);
+
+ // Check that value is a smi.
+ __ JumpIfNotSmi(value, &miss);
+
+ // Prepare tail call to StoreIC_ArrayLength.
+ __ pop(scratch);
+ __ push(receiver);
+ __ push(value);
+ __ push(scratch); // return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
+ __ TailCallExternalReference(ref, 2, 1);
+
+ __ bind(&miss);
+
+ StubCompiler::GenerateStoreMiss(masm, kind());
+}
+
+
+void LoadFieldStub::Generate(MacroAssembler* masm) {
+ StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_);
+ __ ret(0);
+}
+
+
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
// The key is in rdx and the parameter count is in rax.
@@ -2778,6 +2978,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ JumpIfSmi(rax, &runtime);
__ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
__ j(not_equal, &runtime);
+
// Check that the RegExp has been compiled (data contains a fixed array).
__ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
@@ -2798,149 +2999,121 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the number of captures fit in the static offsets vector buffer.
__ SmiToInteger32(rdx,
FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
- // Calculate number of capture registers (number_of_captures + 1) * 2.
- __ leal(rdx, Operand(rdx, rdx, times_1, 2));
- // Check that the static offsets vector buffer is large enough.
- __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize));
+ // Check (number_of_captures + 1) * 2 <= offsets vector size
+ // Or number_of_captures <= offsets vector size / 2 - 1
+ STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+ __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1));
__ j(above, &runtime);
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the second argument is a string.
- __ movq(rdi, Operand(rsp, kSubjectOffset));
- __ JumpIfSmi(rdi, &runtime);
- Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
- __ j(NegateCondition(is_string), &runtime);
-
- // rdi: Subject string.
- // rax: RegExp data (FixedArray).
- // rdx: Number of capture registers.
- // Check that the third argument is a positive smi less than the string
- // length. A negative value will be greater (unsigned comparison).
- __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
- __ JumpIfNotSmi(rbx, &runtime);
- __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
- __ j(above_equal, &runtime);
-
- // rax: RegExp data (FixedArray)
- // rdx: Number of capture registers
- // Check that the fourth object is a JSArray object.
- __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
- __ JumpIfSmi(rdi, &runtime);
- __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
- __ j(not_equal, &runtime);
- // Check that the JSArray is in fast case.
- __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
- __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
- __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
- Heap::kFixedArrayMapRootIndex);
- __ j(not_equal, &runtime);
- // Check that the last match info has space for the capture registers and the
- // additional information. Ensure no overflow in add.
- STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
- __ cmpl(rdx, rdi);
- __ j(greater, &runtime);
-
// Reset offset for possibly sliced string.
__ Set(r14, 0);
- // rax: RegExp data (FixedArray)
- // Check the representation and encoding of the subject string.
- Label seq_ascii_string, seq_two_byte_string, check_code;
__ movq(rdi, Operand(rsp, kSubjectOffset));
- // Make a copy of the original subject string.
- __ movq(r15, rdi);
+ __ JumpIfSmi(rdi, &runtime);
+ __ movq(r15, rdi); // Make a copy of the original subject string.
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- // First check for flat two byte string.
+ // rax: RegExp data (FixedArray)
+ // rdi: subject string
+ // r15: subject string
+ // Handle subject string according to its encoding and representation:
+ // (1) Sequential two byte? If yes, go to (9).
+ // (2) Sequential one byte? If yes, go to (6).
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // (4) Cons string. If the string is flat, replace subject with first string.
+ // Otherwise bailout.
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ // (6) One byte sequential. Load regexp code for one byte.
+ // (E) Carry on.
+ /// [...]
+
+ // Deferred code at the end of the stub:
+ // (7) Not a long external string? If yes, go to (10).
+ // (8) External string. Make it, offset-wise, look like a sequential string.
+ // (8a) Is the external string one byte? If yes, go to (6).
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ // (10) Short external string or not a string? If yes, bail out to runtime.
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+
+ Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */,
+ external_string /* 8 */, check_underlying /* 5a */,
+ not_seq_nor_cons /* 7 */, check_code /* E */,
+ not_long_external /* 10 */;
+
+ // (1) Sequential two byte? If yes, go to (9).
__ andb(rbx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kStringEncodingMask |
kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be a flat ASCII string. None of the following
- // string type tests will succeed if subject is not a string or a short
- // external string.
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+
+ // (2) Sequential one byte? If yes, go to (6).
+ // Any other sequential string must be one byte.
__ andb(rbx, Immediate(kIsNotStringMask |
kStringRepresentationMask |
kShortExternalStringMask));
- __ j(zero, &seq_ascii_string, Label::kNear);
-
- // rbx: whether subject is a string and if yes, its string representation
- // Check for flat cons string or sliced string.
- // A flat cons string is a cons string where the second part is the empty
- // string. In that case the subject string is just the first part of the cons
- // string. Also in this case the first part of the cons string is known to be
- // a sequential string or an external string.
- // In the case of a sliced string its offset has to be taken into account.
- Label cons_string, external_string, check_encoding;
+ __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6).
+
+ // (3) Anything but sequential or cons? If yes, go to (7).
+ // We check whether the subject string is a cons, since sequential strings
+ // have already been covered.
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmpq(rbx, Immediate(kExternalStringTag));
- __ j(less, &cons_string, Label::kNear);
- __ j(equal, &external_string);
-
- // Catch non-string subject or short external string.
- STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
- __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
- __ j(not_zero, &runtime);
+ __ j(greater_equal, &not_seq_nor_cons); // Go to (7).
- // String is sliced.
- __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
- __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
- // r14: slice offset
- // r15: original subject string
- // rdi: parent string
- __ jmp(&check_encoding, Label::kNear);
- // String is a cons string, check whether it is flat.
- __ bind(&cons_string);
+ // (4) Cons string. Check that it's flat.
+ // Replace subject with first string and reload instance type.
__ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
- // rdi: first part of cons string or parent of sliced string.
- // rbx: map of first part of cons string or map of parent of sliced string.
- // Is first part of cons or parent of slice a flat two byte string?
- __ bind(&check_encoding);
+ __ bind(&check_underlying);
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask | kStringEncodingMask));
+ __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
+
+ // (5a) Is subject sequential two byte? If yes, go to (9).
+ __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask));
STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
- __ j(zero, &seq_two_byte_string, Label::kNear);
- // Any other flat string must be sequential ASCII or external.
- __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
- Immediate(kStringRepresentationMask));
- __ j(not_zero, &external_string);
-
- __ bind(&seq_ascii_string);
- // rdi: subject string (sequential ASCII)
+ __ j(zero, &seq_two_byte_string); // Go to (9).
+ // (5b) Is subject external? If yes, go to (8).
+ __ testb(rbx, Immediate(kStringRepresentationMask));
+ // The underlying external string is never a short external string.
+ STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
+ STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+ __ j(not_zero, &external_string); // Go to (8)
+
+ // (6) One byte sequential. Load regexp code for one byte.
+ __ bind(&seq_one_byte_string);
// rax: RegExp data (FixedArray)
__ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
- __ Set(rcx, 1); // Type is ASCII.
- __ jmp(&check_code, Label::kNear);
-
- __ bind(&seq_two_byte_string);
- // rdi: subject string (flat two-byte)
- // rax: RegExp data (FixedArray)
- __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
- __ Set(rcx, 0); // Type is two byte.
+ __ Set(rcx, 1); // Type is one byte.
+ // (E) Carry on. String handling is done.
__ bind(&check_code);
+ // r11: irregexp code
// Check that the irregexp code has been generated for the actual string
// encoding. If it has, the field contains a code object otherwise it contains
// smi (code flushing support)
__ JumpIfSmi(r11, &runtime);
- // rdi: subject string
+ // rdi: sequential subject string (or look-alike, external string)
+ // r15: original subject string
// rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
// r11: code
// Load used arguments before starting to push arguments for call to native
// RegExp code to avoid handling changing stack height.
- __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
+ // We have to use r15 instead of rdi to load the length because rdi might
+ // have been only made to look like a sequential string when it actually
+ // is an external string.
+ __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
+ __ JumpIfNotSmi(rbx, &runtime);
+ __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
+ __ j(above_equal, &runtime);
+ __ SmiToInteger64(rbx, rbx);
// rdi: subject string
// rbx: previous index
@@ -3030,8 +3203,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
@@ -3079,9 +3252,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ leal(rdx, Operand(rax, rax, times_1, 2));
// rdx: Number of capture registers
- // Load last_match_info which is still known to be a fast case JSArray.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
- __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
+ // Check that the fourth object is a JSArray object.
+ __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
+ __ JumpIfSmi(r15, &runtime);
+ __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset));
+ __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset));
+ __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information. Ensure no overflow in add.
+ STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+ __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmpl(rdx, rax);
+ __ j(greater, &runtime);
// rbx: last_match_info backing store (FixedArray)
// rdx: number of capture registers
@@ -3092,12 +3279,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Store last subject and last input.
__ movq(rax, Operand(rsp, kSubjectOffset));
__ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
+ __ movq(rcx, rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastSubjectOffset,
rax,
rdi,
kDontSaveFPRegs);
- __ movq(rax, Operand(rsp, kSubjectOffset));
+ __ movq(rax, rcx);
__ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
__ RecordWriteField(rbx,
RegExpImpl::kLastInputOffset,
@@ -3131,7 +3319,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&done);
// Return last match info.
- __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
+ __ movq(rax, r15);
__ ret(4 * kPointerSize);
__ bind(&exception);
@@ -3157,9 +3345,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ bind(&termination_exception);
__ ThrowUncatchable(rax);
- // External string. Short external strings have already been ruled out.
- // rdi: subject string (expected to be external)
- // rbx: scratch
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+
+ // Deferred code for string handling.
+ // (7) Not a long external string? If yes, go to (10).
+ __ bind(&not_seq_nor_cons);
+ // Compare flags are still set from (3).
+ __ j(greater, &not_long_external, Label::kNear); // Go to (10).
+
+ // (8) External string. Short external strings have been ruled out.
__ bind(&external_string);
__ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
__ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
@@ -3171,16 +3367,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
+ // (8a) Is the external string one byte? If yes, go to (6).
__ testb(rbx, Immediate(kStringEncodingMask));
- __ j(not_zero, &seq_ascii_string);
- __ jmp(&seq_two_byte_string);
+ __ j(not_zero, &seq_one_byte_string); // Goto (6).
- // Do the runtime call to execute the regexp.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
+ // rdi: subject string (flat two-byte)
+ // rax: RegExp data (FixedArray)
+ // (9) Two byte sequential. Load regexp code for one byte. Go to (E).
+ __ bind(&seq_two_byte_string);
+ __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
+ __ Set(rcx, 0); // Type is two byte.
+ __ jmp(&check_code); // Go to (E).
+
+ // (10) Not a string or a short external string? If yes, bail out to runtime.
+ __ bind(&not_long_external);
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
+ __ j(not_zero, &runtime);
+
+ // (11) Sliced string. Replace subject with parent. Go to (5a).
+ // Load offset into r14 and replace subject string with parent.
+ __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
+ __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
+ __ jmp(&check_underlying);
#endif // V8_INTERPRETED_REGEXP
}
@@ -3386,30 +3599,59 @@ static int NegativeComparisonResult(Condition cc) {
}
-void CompareStub::Generate(MacroAssembler* masm) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+static void CheckInputType(MacroAssembler* masm,
+ Register input,
+ CompareIC::State expected,
+ Label* fail) {
+ Label ok;
+ if (expected == CompareIC::SMI) {
+ __ JumpIfNotSmi(input, fail);
+ } else if (expected == CompareIC::NUMBER) {
+ __ JumpIfSmi(input, &ok);
+ __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ j(not_equal, fail);
+ }
+ // We could be strict about internalized/non-internalized here, but as long as
+ // hydrogen doesn't care, the stub doesn't have to care either.
+ __ bind(&ok);
+}
+
+static void BranchIfNotInternalizedString(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the internalized bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask);
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ testb(scratch, Immediate(kIsInternalizedMask));
+ __ j(zero, label);
+}
+
+
+void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
+ Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- // Compare two smis if required.
- if (include_smi_compare_) {
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- Label ok;
- __ JumpIfNotSmi(rdx, &ok);
- __ JumpIfNotSmi(rax, &ok);
- __ Abort("CompareStub: smi operands");
- __ bind(&ok);
- }
+ Label miss;
+ CheckInputType(masm, rdx, left_, &miss);
+ CheckInputType(masm, rax, right_, &miss);
+
+ // Compare two smis.
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -3438,7 +3680,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&user_equal);
- __ pop(rbx); // Return address.
+ __ pop(rbx); // Return address.
__ push(rax);
__ push(rdx);
__ push(rbx);
@@ -3453,66 +3695,58 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
- if (cc_ != equal) {
+ if (cc != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc_));
+ __ Set(rax, NegativeComparisonResult(cc));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
- // Note: if cc_ != equal, never_nan_nan_ is not used.
- // We cannot set rax to EQUAL until just before return because
- // rax must be unchanged on jump to not_identical.
- if (never_nan_nan_ && (cc_ == equal)) {
- __ Set(rax, EQUAL);
- __ ret(0);
- } else {
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc_ != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc_ == greater_equal || cc_ == greater) {
- __ neg(rax);
- }
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc == greater_equal || cc == greater) {
+ __ neg(rax);
}
+ __ ret(0);
__ bind(&not_identical);
}
- if (cc_ == equal) { // Both strict and non-strict.
+ if (cc == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict_) {
+ if (strict()) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
@@ -3564,46 +3798,46 @@ void CompareStub::Generate(MacroAssembler* masm) {
}
// Generate the number comparison code.
- if (include_number_compare_) {
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
-
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc_ != not_equal);
- if (cc_ == less || cc_ == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc != not_equal);
+ if (cc == less || cc == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
}
+ __ ret(0);
- // Fast negative check for symbol-to-symbol equality.
- Label check_for_strings;
- if (cc_ == equal) {
- BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
- BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
- // We've already checked for object identity, so if both operands
- // are symbols they aren't equal. Register eax (not rax) already holds a
- // non-zero value, which indicates not equal, so just return.
+ // Fast negative check for internalized-to-internalized equality.
+ Label check_for_strings;
+ if (cc == equal) {
+ BranchIfNotInternalizedString(
+ masm, &check_for_strings, rax, kScratchRegister);
+ BranchIfNotInternalizedString(
+ masm, &check_for_strings, rdx, kScratchRegister);
+
+ // We've already checked for object identity, so if both operands are
+ // internalized strings they aren't equal. Register eax (not rax) already
+ // holds a non-zero value, which indicates not equal, so just return.
__ ret(0);
}
@@ -3613,7 +3847,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc_ == equal) {
+ if (cc == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
@@ -3634,7 +3868,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
#endif
__ bind(&check_unequal_objects);
- if (cc_ == equal && !strict_) {
+ if (cc == equal && !strict()) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -3674,11 +3908,11 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc == equal) {
+ builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
}
// Restore return address on the stack.
@@ -3687,22 +3921,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
+ __ bind(&miss);
+ GenerateMiss(masm);
}
@@ -3716,12 +3937,13 @@ void InterruptStub::Generate(MacroAssembler* masm) {
}
-static void GenerateRecordCallTarget(MacroAssembler* masm) {
+static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
// rbx : cache cell for call target
// rdi : the function to call
+ ASSERT(!FLAG_optimize_constructed_arrays);
Isolate* isolate = masm->isolate();
Label initialize, done;
@@ -3754,6 +3976,79 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // rbx : cache cell for call target
+ // rdi : the function to call
+ ASSERT(FLAG_optimize_constructed_arrays);
+ Isolate* isolate = masm->isolate();
+ Label initialize, done, miss, megamorphic, not_array_function;
+
+ // Load the cache state into rcx.
+ __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmpq(rcx, rdi);
+ __ j(equal, &done);
+ __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ j(equal, &done);
+
+ // Special handling of the Array() function, which caches not only the
+ // monomorphic Array function but the initial ElementsKind with special
+ // sentinels
+ Handle<Object> terminal_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ LAST_FAST_ELEMENTS_KIND);
+ __ Cmp(rcx, terminal_kind_sentinel);
+ __ j(not_equal, &miss);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
+
+ __ bind(&miss);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
+ __ j(equal, &initialize);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ bind(&megamorphic);
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ TypeFeedbackCells::MegamorphicSentinel(isolate));
+ __ jmp(&done, Label::kNear);
+
+ // An uninitialized cache is patched with the function or sentinel to
+ // indicate the ElementsKind if function is the Array constructor.
+ __ bind(&initialize);
+ // Make sure the function is the Array() function
+ __ LoadArrayFunction(rcx);
+ __ cmpq(rdi, rcx);
+ __ j(not_equal, &not_array_function);
+
+ // The target function is the Array constructor, install a sentinel value in
+ // the constructor's type info cell that will track the initial ElementsKind
+ // that should be used for the array when its constructed.
+ Handle<Object> initial_kind_sentinel =
+ TypeFeedbackCells::MonomorphicArraySentinel(isolate,
+ GetInitialFastElementsKind());
+ __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
+ initial_kind_sentinel);
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
+ // No need for a write barrier here - cells are rescanned.
+
+ __ bind(&done);
+}
+
+
void CallFunctionStub::Generate(MacroAssembler* masm) {
// rbx : cache cell for call target
// rdi : the function to call
@@ -3785,7 +4080,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Fast-case: Just invoke the function.
@@ -3860,14 +4159,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ j(not_equal, &slow);
if (RecordCallTarget()) {
- GenerateRecordCallTarget(masm);
+ if (FLAG_optimize_constructed_arrays) {
+ GenerateRecordCallTarget(masm);
+ } else {
+ GenerateRecordCallTargetNoArray(masm);
+ }
}
// Jump to the function-specific construct stub.
- __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
- __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
- __ jmp(rbx);
+ Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx;
+ __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movq(jmp_reg, FieldOperand(jmp_reg,
+ SharedFunctionInfo::kConstructStubOffset));
+ __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
+ __ jmp(jmp_reg);
// rdi: called object
// rax: number of arguments
@@ -3904,23 +4209,36 @@ bool CEntryStub::IsPregenerated() {
}
-void CodeStub::GenerateStubsAheadOfTime() {
- CEntryStub::GenerateAheadOfTime();
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+ CEntryStub::GenerateAheadOfTime(isolate);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
+ RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
-void CodeStub::GenerateFPStubs() {
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
}
-void CEntryStub::GenerateAheadOfTime() {
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
CEntryStub save_doubles(1, kSaveFPRegs);
- save_doubles.GetCode()->set_is_pregenerated(true);
+ save_doubles.GetCode(isolate)->set_is_pregenerated(true);
+}
+
+
+static void JumpIfOOM(MacroAssembler* masm,
+ Register value,
+ Register scratch,
+ Label* oom_label) {
+ __ movq(scratch, value);
+ STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
+ STATIC_ASSERT(kFailureTag == 3);
+ __ and_(scratch, Immediate(0xf));
+ __ cmpq(scratch, Immediate(0xf));
+ __ j(equal, oom_label);
}
@@ -3959,8 +4277,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ movq(rdi, rax);
#endif
__ movq(kScratchRegister,
- FUNCTION_ADDR(Runtime::PerformGC),
- RelocInfo::RUNTIME_ENTRY);
+ ExternalReference::perform_gc_function(masm->isolate()));
__ call(kScratchRegister);
}
@@ -4038,9 +4355,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
__ j(zero, &retry, Label::kNear);
// Special handling of out of memory exceptions.
- __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
- __ cmpq(rax, kScratchRegister);
- __ j(equal, throw_out_of_memory_exception);
+ JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
ExternalReference pending_exception_address(
@@ -4118,7 +4433,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
- __ movq(rax, failure, RelocInfo::NONE);
+ __ movq(rax, failure, RelocInfo::NONE64);
GenerateCore(masm,
&throw_normal_exception,
&throw_termination_exception,
@@ -4137,7 +4452,10 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Set pending exception and rax to out of memory exception.
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
- __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
+ Label already_have_failure;
+ JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure);
+ __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64);
+ __ bind(&already_have_failure);
__ Store(pending_exception, rax);
// Fall through to the next label.
@@ -4165,7 +4483,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Cannot use smi-register for loading yet.
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
__ push(kScratchRegister); // context slot
__ push(kScratchRegister); // function slot
// Save callee-saved registers (X64/Win64 calling conventions).
@@ -4220,7 +4538,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
isolate);
__ Store(pending_exception, rax);
- __ movq(rax, Failure::Exception(), RelocInfo::NONE);
+ __ movq(rax, Failure::Exception(), RelocInfo::NONE64);
__ jmp(&exit);
// Invoke: Link this frame into the handler chain. There's only one
@@ -4460,44 +4778,6 @@ Register InstanceofStub::left() { return no_reg; }
Register InstanceofStub::right() { return no_reg; }
-int CompareStub::MinorKey() {
- // Encode the three parameters in a unique 16 bit value. To avoid duplicate
- // stubs the never NaN NaN condition is only taken into account if the
- // condition is equals.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- return ConditionField::encode(static_cast<unsigned>(cc_))
- | RegisterField::encode(false) // lhs_ and rhs_ are not used
- | StrictField::encode(strict_)
- | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
- | IncludeNumberCompareField::encode(include_number_compare_)
- | IncludeSmiCompareField::encode(include_smi_compare_);
-}
-
-
-// Unfortunately you have to run without snapshots to see most of these
-// names in the profile since most compare stubs end up in the snapshot.
-void CompareStub::PrintName(StringStream* stream) {
- ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
- const char* cc_name;
- switch (cc_) {
- case less: cc_name = "LT"; break;
- case greater: cc_name = "GT"; break;
- case less_equal: cc_name = "LE"; break;
- case greater_equal: cc_name = "GE"; break;
- case equal: cc_name = "EQ"; break;
- case not_equal: cc_name = "NE"; break;
- default: cc_name = "UnknownCondition"; break;
- }
- bool is_equality = cc_ == equal || cc_ == not_equal;
- stream->Add("CompareStub_%s", cc_name);
- if (strict_ && is_equality) stream->Add("_STRICT");
- if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
- if (!include_number_compare_) stream->Add("_NO_NUMBER");
- if (!include_smi_compare_) stream->Add("_NO_SMI");
-}
-
-
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -4598,7 +4878,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
__ JumpIfNotSmi(code_, &slow_case_);
- __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode));
__ j(above, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
@@ -4630,23 +4910,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
-// -------------------------------------------------------------------------
-// StringCharAtGenerator
-
-void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
- char_code_at_generator_.GenerateFast(masm);
- char_from_code_generator_.GenerateFast(masm);
-}
-
-
-void StringCharAtGenerator::GenerateSlow(
- MacroAssembler* masm,
- const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
- char_from_code_generator_.GenerateSlow(masm, call_helper);
-}
-
-
void StringAddStub::Generate(MacroAssembler* masm) {
Label call_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
@@ -4725,8 +4988,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Look at the length of the result of adding the two strings.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
__ SmiAdd(rbx, rbx, rcx);
- // Use the symbol table when adding two one character strings, as it
- // helps later optimizations to return a symbol here.
+ // Use the string table when adding two one character strings, as it
+ // helps later optimizations to return an internalized string here.
__ SmiCompare(rbx, Smi::FromInt(2));
__ j(not_equal, &longer_than_two);
@@ -4735,13 +4998,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
- // Try to lookup two character string in symbol table. If it is not found
+ // Try to lookup two character string in string table. If it is not found
// just allocate a new one.
Label make_two_character_string, make_flat_ascii_string;
- StringHelper::GenerateTwoCharacterSymbolTableProbe(
+ StringHelper::GenerateTwoCharacterStringTableProbe(
masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4752,11 +5015,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx - first byte: first character
// rbx - second byte: *maybe* second character
// Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ shll(rcx, Immediate(kBitsPerByte));
__ orl(rbx, rcx);
// Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
+ __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4838,8 +5101,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
__ bind(&first_prepared);
// Check whether both strings have same encoding.
@@ -4859,8 +5122,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
@@ -4876,7 +5139,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
// rax: result string
// Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rcx: first char of first string
// rbx: first character of result
// r14: length of first string
@@ -5040,7 +5303,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&done);
}
-void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -5052,7 +5315,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register scratch = scratch3;
// Make sure that both characters are not digits as such strings has a
- // different hash algorithm. Don't try to look for these in the symbol table.
+ // different hash algorithm. Don't try to look for these in the string table.
Label not_array_index;
__ leal(scratch, Operand(c1, -'0'));
__ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
@@ -5076,14 +5339,14 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string.
- // Load the symbol table.
- Register symbol_table = c2;
- __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
+ // Load the string table.
+ Register string_table = c2;
+ __ LoadRoot(string_table, Heap::kStringTableRootIndex);
- // Calculate capacity mask from the symbol table capacity.
+ // Calculate capacity mask from the string table capacity.
Register mask = scratch2;
__ SmiToInteger32(mask,
- FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
+ FieldOperand(string_table, StringTable::kCapacityOffset));
__ decl(mask);
Register map = scratch4;
@@ -5091,31 +5354,31 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Registers
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
// hash: hash of two character string (32-bit int)
- // symbol_table: symbol table
+ // string_table: string table
// mask: capacity mask (32-bit int)
// map: -
// scratch: -
- // Perform a number of probes in the symbol table.
+ // Perform a number of probes in the string table.
static const int kProbes = 4;
- Label found_in_symbol_table;
+ Label found_in_string_table;
Label next_probe[kProbes];
Register candidate = scratch; // Scratch register contains candidate.
for (int i = 0; i < kProbes; i++) {
- // Calculate entry in symbol table.
+ // Calculate entry in string table.
__ movl(scratch, hash);
if (i > 0) {
- __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
+ __ addl(scratch, Immediate(StringTable::GetProbeOffset(i)));
}
__ andl(scratch, mask);
- // Load the entry from the symbol table.
- STATIC_ASSERT(SymbolTable::kEntrySize == 1);
+ // Load the entry from the string table.
+ STATIC_ASSERT(StringTable::kEntrySize == 1);
__ movq(candidate,
- FieldOperand(symbol_table,
+ FieldOperand(string_table,
scratch,
times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ StringTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
Label is_string;
@@ -5128,7 +5391,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
if (FLAG_debug_code) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
__ cmpq(kScratchRegister, candidate);
- __ Assert(equal, "oddball in symbol table is not undefined or the hole");
+ __ Assert(equal, "oddball in string table is not undefined or the hole");
}
__ jmp(&next_probe[i]);
@@ -5149,10 +5412,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe[i]);
// Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
+ __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
__ andl(temp, Immediate(0x0000ffff));
__ cmpl(chars, temp);
- __ j(equal, &found_in_symbol_table);
+ __ j(equal, &found_in_string_table);
__ bind(&next_probe[i]);
}
@@ -5161,7 +5424,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Scratch register contains result when we fall through to here.
Register result = candidate;
- __ bind(&found_in_symbol_table);
+ __ bind(&found_in_string_table);
if (!result.is(rax)) {
__ movq(rax, result);
}
@@ -5267,6 +5530,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(kArgumentsSize);
__ bind(&not_original_string);
+
+ Label single_char;
+ __ SmiCompare(rcx, Smi::FromInt(1));
+ __ j(equal, &single_char);
+
__ SmiToInteger32(rcx, rcx);
// rax: string
@@ -5287,7 +5555,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Cons string. Check whether it is flat, then fetch first part.
// Flat cons strings have an empty second part.
__ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, &runtime);
__ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
// Update instance type.
@@ -5367,7 +5635,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -5384,10 +5652,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -5409,7 +5677,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
__ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
@@ -5427,6 +5695,17 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+ __ bind(&single_char);
+ // rax: string
+ // rbx: instance type
+ // rcx: sub string length (smi)
+ // rdx: from index (smi)
+ StringCharAtGenerator generator(
+ rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm);
+ __ ret(kArgumentsSize);
+ generator.SkipSlow(masm, &runtime);
}
@@ -5514,16 +5793,32 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
// Compare lengths (precomputed).
__ bind(&compare_lengths);
__ SmiTest(length_difference);
+#ifndef ENABLE_LATIN_1
__ j(not_zero, &result_not_equal, Label::kNear);
+#else
+ Label length_not_equal;
+ __ j(not_zero, &length_not_equal, Label::kNear);
+#endif
// Result is EQUAL.
__ Move(rax, Smi::FromInt(EQUAL));
__ ret(0);
Label result_greater;
+#ifdef ENABLE_LATIN_1
+ Label result_less;
+ __ bind(&length_not_equal);
+ __ j(greater, &result_greater, Label::kNear);
+ __ jmp(&result_less, Label::kNear);
+#endif
__ bind(&result_not_equal);
// Unequal comparison of left to right, either character or length.
+#ifndef ENABLE_LATIN_1
__ j(greater, &result_greater, Label::kNear);
+#else
+ __ j(above, &result_greater, Label::kNear);
+ __ bind(&result_less);
+#endif
// Result is LESS.
__ Move(rax, Smi::FromInt(LESS));
@@ -5549,9 +5844,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -5607,7 +5902,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMIS);
+ ASSERT(state_ == CompareIC::SMI);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -5619,7 +5914,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
__ subq(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ SmiNot(rdx, rdx);
+ __ not_(rdx);
__ bind(&done);
__ movq(rax, rdx);
}
@@ -5630,24 +5925,42 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBERS);
+void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::NUMBER);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
- Condition either_smi = masm->CheckEitherSmi(rax, rdx);
- __ j(either_smi, &generic_stub, Label::kNear);
- __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
+ if (left_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rdx, &miss);
+ }
+ if (right_ == CompareIC::SMI) {
+ __ JumpIfNotSmi(rax, &miss);
+ }
+
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(rax, &right_smi, Label::kNear);
+ __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&left, Label::kNear);
+ __ bind(&right_smi);
+ __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
+ __ cvtlsi2sd(xmm1, rcx);
+
+ __ bind(&left);
+ __ JumpIfSmi(rdx, &left_smi, Label::kNear);
+ __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
__ j(not_equal, &maybe_undefined2, Label::kNear);
-
- // Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
+ __ jmp(&done);
+ __ bind(&left_smi);
+ __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
+ __ cvtlsi2sd(xmm0, rcx);
+ __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
@@ -5663,14 +5976,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ret(0);
__ bind(&unordered);
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
+ CompareIC::GENERIC);
+ __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
+ __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -5687,8 +6002,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOLS);
+void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -5702,17 +6017,72 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
Condition cond = masm->CheckEitherSmi(left, right, tmp1);
__ j(cond, &miss, Label::kNear);
- // Check that both operands are symbols.
+ // Check that both operands are internalized strings.
__ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
__ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
__ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
__ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ testb(tmp1, Immediate(kIsInternalizedMask));
__ j(zero, &miss, Label::kNear);
- // Symbols are compared by identity.
+ // Internalized strings are compared by identity.
+ Label done;
+ __ cmpq(left, right);
+ // Make sure rax is non-zero. At this point input operands are
+ // guaranteed to be non-zero.
+ ASSERT(right.is(rax));
+ __ j(not_equal, &done, Label::kNear);
+ STATIC_ASSERT(EQUAL == 0);
+ STATIC_ASSERT(kSmiTag == 0);
+ __ Move(rax, Smi::FromInt(EQUAL));
+ __ bind(&done);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
+ ASSERT(state_ == CompareIC::UNIQUE_NAME);
+ ASSERT(GetCondition() == equal);
+
+ // Registers containing left and right operands respectively.
+ Register left = rdx;
+ Register right = rax;
+ Register tmp1 = rcx;
+ Register tmp2 = rbx;
+
+ // Check that both operands are heap objects.
+ Label miss;
+ Condition cond = masm->CheckEitherSmi(left, right, tmp1);
+ __ j(cond, &miss, Label::kNear);
+
+ // Check that both operands are unique names. This leaves the instance
+ // types loaded in tmp1 and tmp2.
+ STATIC_ASSERT(kInternalizedTag != 0);
+ __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
+ __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
+ __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
+ __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
+
+ Label succeed1;
+ __ testb(tmp1, Immediate(kIsInternalizedMask));
+ __ j(not_zero, &succeed1, Label::kNear);
+ __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ __ j(not_equal, &miss, Label::kNear);
+ __ bind(&succeed1);
+
+ Label succeed2;
+ __ testb(tmp2, Immediate(kIsInternalizedMask));
+ __ j(not_zero, &succeed2, Label::kNear);
+ __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
+ __ j(not_equal, &miss, Label::kNear);
+ __ bind(&succeed2);
+
+ // Unique names are compared by identity.
Label done;
__ cmpq(left, right);
// Make sure rax is non-zero. At this point input operands are
@@ -5731,7 +6101,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRINGS);
+ ASSERT(state_ == CompareIC::STRING);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -5771,13 +6141,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
// Handle not identical strings.
__ bind(&not_same);
- // Check that both strings are symbols. If they are, we're done
+ // Check that both strings are internalized strings. If they are, we're done
// because we already know they are not identical.
if (equality) {
Label do_compare;
- STATIC_ASSERT(kSymbolTag != 0);
+ STATIC_ASSERT(kInternalizedTag != 0);
__ and_(tmp1, tmp2);
- __ testb(tmp1, Immediate(kIsSymbolMask));
+ __ testb(tmp1, Immediate(kIsInternalizedMask));
__ j(zero, &do_compare, Label::kNear);
// Make sure rax is non-zero. At this point input operands are
// guaranteed to be non-zero.
@@ -5817,7 +6187,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECTS);
+ ASSERT(state_ == CompareIC::OBJECT);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -5937,10 +6307,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
__ j(equal, &the_hole, Label::kNear);
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
__ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
+ Immediate(kIsInternalizedMask));
__ j(zero, miss);
__ bind(&the_hole);
@@ -6069,14 +6439,14 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
__ j(equal, &in_dictionary);
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
- // If we hit a non symbol key during negative lookup
+ // If we hit a non internalized string key during negative lookup
// we have to bailout as this key might be equal to the
// key we are looking for.
- // Check if the entry name is not a symbol.
+ // Check if the entry name is not an internalized string.
__ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
__ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
+ Immediate(kIsInternalizedMask));
__ j(zero, &maybe_in_dictionary);
}
}
@@ -6167,15 +6537,16 @@ bool RecordWriteStub::IsPregenerated() {
}
-void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+ Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode()->set_is_pregenerated(true);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode()->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
+void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
!entry->object.is(no_reg);
entry++) {
@@ -6184,7 +6555,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
entry->address,
entry->action,
kDontSaveFPRegs);
- stub.GetCode()->set_is_pregenerated(true);
+ stub.GetCode(isolate)->set_is_pregenerated(true);
}
}
@@ -6290,13 +6661,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(arg1));
__ Move(address, regs_.address());
__ Move(arg1, regs_.object());
- if (mode == INCREMENTAL_COMPACTION) {
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
- } else {
- ASSERT(mode == INCREMENTAL);
- __ movq(arg2, Operand(address, 0));
- }
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg2, address);
__ LoadAddress(arg3, ExternalReference::isolate_address());
int argument_count = 3;
@@ -6485,6 +6851,21 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+ ASSERT(!Serializer::enabled());
+ CEntryStub ces(1, kSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ movq(rbx, MemOperand(rbp, parameter_count_offset));
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ __ pop(rcx);
+ __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size,
+ extra_expression_stack_count_ * kPointerSize));
+ __ jmp(rcx); // Return to IC Miss stub, continuation still on stack.
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
ProfileEntryHookStub stub;
@@ -6531,7 +6912,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
#endif
// Call the entry hook function.
- __ movq(rax, &entry_hook_, RelocInfo::NONE);
+ __ movq(rax, &entry_hook_, RelocInfo::NONE64);
__ movq(rax, Operand(rax, 0));
AllowExternalCallThatCantCauseGC scope(masm);
diff --git a/src/3rdparty/v8/src/x64/code-stubs-x64.h b/src/3rdparty/v8/src/x64/code-stubs-x64.h
index 6a1a18f..675e95b 100644
--- a/src/3rdparty/v8/src/x64/code-stubs-x64.h
+++ b/src/3rdparty/v8/src/x64/code-stubs-x64.h
@@ -37,7 +37,7 @@ namespace internal {
// Compute a transcendental math function natively, or call the
// TranscendentalCache runtime function.
-class TranscendentalCacheStub: public CodeStub {
+class TranscendentalCacheStub: public PlatformCodeStub {
public:
enum ArgumentType {
TAGGED = 0,
@@ -60,7 +60,7 @@ class TranscendentalCacheStub: public CodeStub {
};
-class StoreBufferOverflowStub: public CodeStub {
+class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
@@ -68,7 +68,7 @@ class StoreBufferOverflowStub: public CodeStub {
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated() { return true; }
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
@@ -79,14 +79,7 @@ class StoreBufferOverflowStub: public CodeStub {
};
-// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
-enum GenericBinaryFlags {
- NO_GENERIC_BINARY_FLAGS = 0,
- NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
-};
-
-
-class UnaryOpStub: public CodeStub {
+class UnaryOpStub: public PlatformCodeStub {
public:
UnaryOpStub(Token::Value op,
UnaryOverwriteMode mode,
@@ -134,9 +127,9 @@ class UnaryOpStub: public CodeStub {
Label* non_smi,
Label::Distance non_smi_near);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateHeapNumberStubSub(MacroAssembler* masm);
- void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
+ void GenerateNumberStub(MacroAssembler* masm);
+ void GenerateNumberStubSub(MacroAssembler* masm);
+ void GenerateNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
@@ -157,95 +150,6 @@ class UnaryOpStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo operands_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operands_type_(operands_type),
- result_type_(result_type) { }
-
- private:
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- Token::Value op_;
- OverwriteMode mode_;
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo operands_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
- class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | OperandTypeInfoBits::encode(operands_type_)
- | ResultTypeInfoBits::encode(result_type_);
- }
-
- void Generate(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateSmiCode(MacroAssembler* masm,
- Label* slow,
- SmiCodeGenerateHeapNumberResults heapnumber_results);
- void GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure);
- void GenerateStringAddCode(MacroAssembler* masm);
- void GenerateCallRuntimeCode(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
-
- void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(operands_type_);
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_binary_op_type(operands_type_);
- code->set_binary_op_result_type(result_type_);
- }
-
- friend class CodeGenerator;
-};
-
-
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -268,11 +172,11 @@ class StringHelper : public AllStatic {
bool ascii);
- // Probe the symbol table for a two character string. If the string is
+ // Probe the string table for a two character string. If the string is
// not found by probing a jump to the label not_found is performed. This jump
- // does not guarantee that the string is not in the symbol table. If the
+ // does not guarantee that the string is not in the string table. If the
// string is found the code falls through with the string in register rax.
- static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
Register c1,
Register c2,
Register scratch1,
@@ -312,7 +216,7 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringAddStub: public PlatformCodeStub {
public:
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
@@ -334,7 +238,7 @@ class StringAddStub: public CodeStub {
};
-class SubStringStub: public CodeStub {
+class SubStringStub: public PlatformCodeStub {
public:
SubStringStub() {}
@@ -346,7 +250,7 @@ class SubStringStub: public CodeStub {
};
-class StringCompareStub: public CodeStub {
+class StringCompareStub: public PlatformCodeStub {
public:
StringCompareStub() {}
@@ -383,7 +287,7 @@ class StringCompareStub: public CodeStub {
};
-class NumberToStringStub: public CodeStub {
+class NumberToStringStub: public PlatformCodeStub {
public:
NumberToStringStub() { }
@@ -412,7 +316,7 @@ class NumberToStringStub: public CodeStub {
};
-class StringDictionaryLookupStub: public CodeStub {
+class StringDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
@@ -474,7 +378,7 @@ class StringDictionaryLookupStub: public CodeStub {
};
-class RecordWriteStub: public CodeStub {
+class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Register object,
Register value,
@@ -498,7 +402,7 @@ class RecordWriteStub: public CodeStub {
};
virtual bool IsPregenerated();
- static void GenerateFixedRegStubsAheadOfTime();
+ static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8.
@@ -657,7 +561,7 @@ class RecordWriteStub: public CodeStub {
Register GetRegThatIsNotRcxOr(Register r1,
Register r2,
Register r3) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
Register candidate = Register::FromAllocationIndex(i);
if (candidate.is(rcx)) continue;
if (candidate.is(r1)) continue;
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.cc b/src/3rdparty/v8/src/x64/codegen-x64.cc
index ffccf47..fa8b44a 100644
--- a/src/3rdparty/v8/src/x64/codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/codegen-x64.cc
@@ -91,7 +91,38 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
+
+ CPU::FlushICache(buffer, actual_size);
+ OS::ProtectCode(buffer, actual_size);
+ return FUNCTION_CAST<UnaryMathFunction>(buffer);
+}
+
+
+UnaryMathFunction CreateExpFunction() {
+ if (!FLAG_fast_math) return &exp;
+ size_t actual_size;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return &exp;
+ ExternalReference::InitializeMathExpData();
+
+ MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+ // xmm0: raw double input.
+ XMMRegister input = xmm0;
+ XMMRegister result = xmm1;
+ __ push(rax);
+ __ push(rbx);
+
+ MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
+
+ __ pop(rbx);
+ __ pop(rax);
+ __ movsd(xmm0, result);
+ __ Ret();
+
+ CodeDesc desc;
+ masm.GetCode(&desc);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -115,7 +146,7 @@ UnaryMathFunction CreateSqrtFunction() {
CodeDesc desc;
masm.GetCode(&desc);
- ASSERT(desc.reloc_size == 0);
+ ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
@@ -182,7 +213,7 @@ ModuloFunction CreateModuloFunction() {
__ j(zero, &valid_result);
__ fstp(0); // Drop result in st(0).
int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
- __ movq(rcx, kNaNValue, RelocInfo::NONE);
+ __ movq(rcx, kNaNValue, RelocInfo::NONE64);
__ movq(Operand(rsp, kPointerSize), rcx);
__ movsd(xmm0, Operand(rsp, kPointerSize));
__ jmp(&return_result);
@@ -221,7 +252,8 @@ ModuloFunction CreateModuloFunction() {
#define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
- MacroAssembler* masm) {
+ MacroAssembler* masm, AllocationSiteMode mode,
+ Label* allocation_site_info_found) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -229,6 +261,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
// -- rdx : receiver
// -- rsp[0] : return address
// -----------------------------------
+ if (mode == TRACK_ALLOCATION_SITE) {
+ ASSERT(allocation_site_info_found != NULL);
+ __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
+ __ j(equal, allocation_site_info_found);
+ }
+
// Set transitioned map.
__ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
__ RecordWriteField(rdx,
@@ -242,7 +280,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
void ElementsTransitionGenerator::GenerateSmiToDouble(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -253,6 +291,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// The fail label is not actually used since we do not allocate.
Label allocated, new_backing_store, only_change_map, done;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
+ __ j(equal, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -297,7 +340,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
Label loop, entry, convert_hole;
- __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
// r15: the-hole NaN
__ jmp(&entry);
@@ -364,7 +407,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
void ElementsTransitionGenerator::GenerateDoubleToObject(
- MacroAssembler* masm, Label* fail) {
+ MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
// ----------- S t a t e -------------
// -- rax : value
// -- rbx : target map
@@ -374,6 +417,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ __ TestJSArrayForAllocationSiteInfo(rdx, rdi);
+ __ j(equal, fail);
+ }
+
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
@@ -395,7 +443,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
// Prepare for conversion loop.
- __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
+ __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
__ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
// rsi: the-hole NaN
// rdi: pointer to the-hole
@@ -506,7 +554,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
- Heap::kEmptyStringRootIndex);
+ Heap::kempty_stringRootIndex);
__ j(not_equal, call_runtime);
__ movq(string, FieldOperand(string, ConsString::kFirstOffset));
@@ -571,7 +619,99 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzxbl(result, FieldOperand(string,
index,
times_1,
- SeqAsciiString::kHeaderSize));
+ SeqOneByteString::kHeaderSize));
+ __ bind(&done);
+}
+
+
+void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
+ String::Encoding encoding,
+ Register string,
+ Register index,
+ Register value) {
+ if (FLAG_debug_code) {
+ __ Check(masm->CheckSmi(index), "Non-smi index");
+ __ Check(masm->CheckSmi(value), "Non-smi value");
+
+ __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
+ __ Check(less, "Index is too large");
+
+ __ SmiCompare(index, Smi::FromInt(0));
+ __ Check(greater_equal, "Index is negative");
+
+ __ push(value);
+ __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
+ __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
+
+ __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
+ static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+ static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+ __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
+ ? one_byte_seq_type : two_byte_seq_type));
+ __ Check(equal, "Unexpected string type");
+ __ pop(value);
+ }
+
+ __ SmiToInteger32(value, value);
+ __ SmiToInteger32(index, index);
+ if (encoding == String::ONE_BYTE_ENCODING) {
+ __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
+ value);
+ } else {
+ __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
+ value);
+ }
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2) {
+ ASSERT(!input.is(result));
+ ASSERT(!input.is(double_scratch));
+ ASSERT(!result.is(double_scratch));
+ ASSERT(!temp1.is(temp2));
+ ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+
+ Label done;
+
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
+ __ xorpd(result, result);
+ __ ucomisd(double_scratch, input);
+ __ j(above_equal, &done);
+ __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
+ __ j(above_equal, &done);
+ __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
+ __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
+ __ mulsd(double_scratch, input);
+ __ addsd(double_scratch, result);
+ __ movq(temp2, double_scratch);
+ __ subsd(double_scratch, result);
+ __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
+ __ lea(temp1, Operand(temp2, 0x1ff800));
+ __ and_(temp2, Immediate(0x7ff));
+ __ shr(temp1, Immediate(11));
+ __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
+ __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
+ __ shl(temp1, Immediate(52));
+ __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
+ __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
+ __ subsd(double_scratch, input);
+ __ movsd(input, double_scratch);
+ __ subsd(result, double_scratch);
+ __ mulsd(input, double_scratch);
+ __ mulsd(result, input);
+ __ movq(input, temp1);
+ __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
+ __ subsd(result, double_scratch);
+ __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
+ __ mulsd(result, input);
+
__ bind(&done);
}
@@ -599,28 +739,6 @@ static byte* GetNoCodeAgeSequence(uint32_t* length) {
}
-byte* Code::FindPlatformCodeAgeSequence() {
- byte* start = instruction_start();
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (!memcmp(start, young_sequence, young_length) ||
- *start == kCallOpcode) {
- return start;
- } else {
- byte* start_after_strict = NULL;
- if (kind() == FUNCTION) {
- start_after_strict = start + kSizeOfFullCodegenStrictModePrologue;
- } else {
- ASSERT(kind() == OPTIMIZED_FUNCTION);
- start_after_strict = start + kSizeOfOptimizedStrictModePrologue;
- }
- ASSERT(!memcmp(start_after_strict, young_sequence, young_length) ||
- *start_after_strict == kCallOpcode);
- return start_after_strict;
- }
-}
-
-
bool Code::IsYoungSequence(byte* sequence) {
uint32_t young_length;
byte* young_sequence = GetNoCodeAgeSequence(&young_length);
diff --git a/src/3rdparty/v8/src/x64/codegen-x64.h b/src/3rdparty/v8/src/x64/codegen-x64.h
index 5d8bbff..3a7646b 100644
--- a/src/3rdparty/v8/src/x64/codegen-x64.h
+++ b/src/3rdparty/v8/src/x64/codegen-x64.h
@@ -39,14 +39,15 @@ class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
-static const int kSizeOfFullCodegenStrictModePrologue = 14;
-static const int kSizeOfOptimizedStrictModePrologue = 14;
-
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
+ CodeGenerator() {
+ InitializeAstVisitor();
+ }
+
static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
@@ -66,6 +67,8 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
private:
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
@@ -86,6 +89,20 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
+
+class MathExpGenerator : public AllStatic {
+ public:
+ static void EmitMathExp(MacroAssembler* masm,
+ XMMRegister input,
+ XMMRegister result,
+ XMMRegister double_scratch,
+ Register temp1,
+ Register temp2);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
index a3fe8f9..8e776f9 100644
--- a/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
+++ b/src/3rdparty/v8/src/x64/deoptimizer-x64.cc
@@ -46,11 +46,14 @@ int Deoptimizer::patch_size() {
}
-void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- HandleScope scope;
+void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
+ JSFunction* function) {
+ Isolate* isolate = function->GetIsolate();
+ HandleScope scope(isolate);
AssertNoAllocation no_allocation;
- if (!function->IsOptimized()) return;
+ ASSERT(function->IsOptimized());
+ ASSERT(function->FunctionsInFunctionListShareSameCode());
// The optimized code is going to be patched, so we cannot use it
// any more. Play safe and reset the whole cache.
@@ -82,7 +85,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// There is room enough to write a long call instruction because we pad
// LLazyBailout instructions with nops if necessary.
CodePatcher patcher(call_address, Assembler::kCallInstructionLength);
- patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE);
+ patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
+ RelocInfo::NONE64);
ASSERT(prev_call_address == NULL ||
call_address >= prev_call_address + patch_size());
ASSERT(call_address + patch_size() <= code->instruction_end());
@@ -91,8 +95,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
#endif
}
- Isolate* isolate = code->GetIsolate();
-
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = isolate->deoptimizer_data();
@@ -116,8 +118,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1f;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -129,31 +129,26 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
+ // The back edge bookkeeping code matches the pattern:
//
- // cmp rsp, <limit>
- // jae ok
+ // add <profiling_counter>, <-delta>
+ // jns ok
// call <stack guard>
// test rax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // cmp rsp, <limit> ;; Not changed
+ // add <profiling_counter>, <-delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test rax, <loop nesting depth>
// ok:
//
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -176,13 +171,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
Assembler::set_target_address_at(call_target_address,
check_code->entry());
@@ -211,7 +201,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
void Deoptimizer::DoComputeOsrOutputFrame() {
DeoptimizationInputData* data = DeoptimizationInputData::cast(
- optimized_code_->deoptimization_data());
+ compiled_code_->deoptimization_data());
unsigned ast_id = data->OsrAstId()->value();
// TODO(kasperl): This should not be the bailout_id_. It should be
// the ast id. Confusing.
@@ -248,7 +238,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned input_frame_size = input_->GetFrameSize();
ASSERT(fixed_size + height_in_bytes == input_frame_size);
- unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
+ unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize;
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
unsigned outgoing_size = outgoing_height * kPointerSize;
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
@@ -340,7 +330,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
unsigned pc_offset = data->OsrPcOffset()->value();
intptr_t pc = reinterpret_cast<intptr_t>(
- optimized_code_->entry() + pc_offset);
+ compiled_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation =
@@ -358,104 +348,190 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
-void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
- int frame_index) {
- JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
- unsigned height = iterator->Next();
- unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
- PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
- }
-
- unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
+ int frame_index) {
+ //
+ // FROM TO
+ // | .... | | .... |
+ // +-------------------------+ +-------------------------+
+ // | JSFunction continuation | | JSFunction continuation |
+ // +-------------------------+ +-------------------------+
+ // | | saved frame (rbp) | | saved frame (rbp) |
+ // | +=========================+<-rbp +=========================+<-rbp
+ // | | JSFunction context | | JSFunction context |
+ // v +-------------------------+ +-------------------------|
+ // | COMPILED_STUB marker | | STUB_FAILURE marker |
+ // +-------------------------+ +-------------------------+
+ // | | | caller args.arguments_ |
+ // | ... | +-------------------------+
+ // | | | caller args.length_ |
+ // |-------------------------|<-rsp +-------------------------+
+ // | caller args pointer |
+ // +-------------------------+
+ // | caller stack param 1 |
+ // parameters in registers +-------------------------+
+ // and spilled to stack | .... |
+ // +-------------------------+
+ // | caller stack param n |
+ // +-------------------------+<-rsp
+ // rax = number of parameters
+ // rbx = failure handler address
+ // rbp = saved frame
+ // rsi = JSFunction context
+ //
- // Allocate and store the output frame description.
+ ASSERT(compiled_code_->kind() == Code::COMPILED_STUB);
+ int major_key = compiled_code_->major_key();
+ CodeStubInterfaceDescriptor* descriptor =
+ isolate_->code_stub_interface_descriptor(major_key);
+
+ // The output frame must have room for all pushed register parameters
+ // and the standard stack frame slots. Include space for an argument
+ // object to the callee and optionally the space to pass the argument
+ // object to the stub failure handler.
+ int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
+ sizeof(Arguments) + kPointerSize;
+ int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
+ int input_frame_size = input_->GetFrameSize();
+ int output_frame_size = height_in_bytes + fixed_frame_size;
+ if (trace_) {
+ PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n",
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ height_in_bytes);
+ }
+
+ // The stub failure trampoline is a single frame.
FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, function);
- output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
-
- // Arguments adaptor can not be topmost or bottommost.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
+ new(output_frame_size) FrameDescription(output_frame_size, NULL);
+ output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ASSERT(frame_index == 0);
output_[frame_index] = output_frame;
- // The top address of the frame is computed from the previous
- // frame's top and this frame's size.
- intptr_t top_address;
- top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ // The top address for the output frame can be computed from the input
+ // frame pointer and the output frame's height. Subtract space for the
+ // context and function slots.
+ intptr_t top_address = input_->GetRegister(rbp.code()) - (2 * kPointerSize) -
+ height_in_bytes;
output_frame->SetTop(top_address);
- // Compute the incoming parameter translation.
- int parameter_count = height;
- unsigned output_offset = output_frame_size;
- for (int i = 0; i < parameter_count; ++i) {
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
+ // Read caller's PC (JSFunction continuation) from the input frame.
+ unsigned input_frame_offset = input_frame_size - kPointerSize;
+ unsigned output_frame_offset = output_frame_size - kPointerSize;
+ intptr_t value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_frame_offset, output_frame_offset, value);
+ }
+
+ // Read caller's FP from the input frame, and set this frame's FP.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ intptr_t frame_ptr = input_->GetRegister(rbp.code());
+ output_frame->SetRegister(rbp.code(), frame_ptr);
+ output_frame->SetFp(frame_ptr);
+ if (trace_) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
+ // The context can be gotten from the input frame.
+ input_frame_offset -= kPointerSize;
+ value = input_->GetFrameSlot(input_frame_offset);
+ output_frame->SetRegister(rsi.code(), value);
+ output_frame_offset -= kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
+ V8PRIxPTR " ; context\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ // A marker value is used in place of the function.
+ output_frame_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE));
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; caller's fp\n",
- fp_value, output_offset, value);
+ V8PRIxPTR " ; function (stub failure sentinel)\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // A marker value is used in place of the context.
- output_offset -= kPointerSize;
- intptr_t context = reinterpret_cast<intptr_t>(
- Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- output_frame->SetFrameSlot(output_offset, context);
- if (FLAG_trace_deopt) {
+ intptr_t caller_arg_count = 0;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ caller_arg_count =
+ input_->GetRegister(descriptor->stack_parameter_count_->code());
+ }
+
+ // Build the Arguments object for the caller's parameters and a pointer to it.
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
+ (caller_arg_count - 1) * kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; context (adaptor sentinel)\n",
- top_address + output_offset, output_offset, context);
+ V8PRIxPTR " ; args.arguments\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ output_frame_offset -= kPointerSize;
+ value = caller_arg_count;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; function\n",
- top_address + output_offset, output_offset, value);
+ V8PRIxPTR " ; args.length\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- // Number of incoming arguments.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ output_frame_offset -= kPointerSize;
+ value = frame_ptr - (output_frame_size - output_frame_offset) -
+ StandardFrameConstants::kMarkerOffset + kPointerSize;
+ output_frame->SetFrameSlot(output_frame_offset, value);
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
- V8PRIxPTR " ; argc (%d)\n",
- top_address + output_offset, output_offset, value, height - 1);
+ V8PRIxPTR " ; args*\n",
+ top_address + output_frame_offset, output_frame_offset, value);
}
- ASSERT(0 == output_offset);
+ // Copy the register parameters to the failure frame.
+ for (int i = 0; i < descriptor->register_param_count_; ++i) {
+ output_frame_offset -= kPointerSize;
+ DoTranslateCommand(iterator, 0, output_frame_offset);
+ }
- Builtins* builtins = isolate_->builtins();
- Code* adaptor_trampoline =
- builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
- intptr_t pc_value = reinterpret_cast<intptr_t>(
- adaptor_trampoline->instruction_start() +
- isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
- output_frame->SetPc(pc_value);
+ ASSERT(0 == output_frame_offset);
+
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ double double_value = input_->GetDoubleRegister(i);
+ output_frame->SetDoubleRegister(i, double_value);
+ }
+
+ intptr_t handler =
+ reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+ int params = descriptor->register_param_count_;
+ if (descriptor->stack_parameter_count_ != NULL) {
+ params++;
+ }
+ output_frame->SetRegister(rax.code(), params);
+ output_frame->SetRegister(rbx.code(), handler);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* trampoline = NULL;
+ int extra = descriptor->extra_expression_stack_count_;
+ StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_);
+ ASSERT(trampoline != NULL);
+ output_frame->SetPc(reinterpret_cast<intptr_t>(
+ trampoline->instruction_start()));
+ output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
+ Code* notify_failure =
+ isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(notify_failure->entry()));
}
@@ -466,7 +542,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating construct stub => height=%d\n", height_in_bytes);
}
@@ -501,7 +577,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, callers_pc);
@@ -513,7 +589,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
@@ -523,7 +599,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; context\n",
top_address + output_offset, output_offset, value);
@@ -533,7 +609,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; function (construct sentinel)\n",
top_address + output_offset, output_offset, value);
@@ -543,7 +619,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(construct_stub);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; code object\n",
top_address + output_offset, output_offset, value);
@@ -553,7 +629,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; argc (%d)\n",
top_address + output_offset, output_offset, value, height - 1);
@@ -564,7 +640,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; allocated receiver\n",
top_address + output_offset, output_offset, value);
@@ -579,125 +655,6 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
}
-void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator,
- int frame_index,
- bool is_setter_stub_frame) {
- JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next()));
- // The receiver (and the implicit return value, if any) are expected in
- // registers by the LoadIC/StoreIC, so they don't belong to the output stack
- // frame. This means that we have to use a height of 0.
- unsigned height = 0;
- unsigned height_in_bytes = height * kPointerSize;
- const char* kind = is_setter_stub_frame ? "setter" : "getter";
- if (FLAG_trace_deopt) {
- PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes);
- }
-
- // We need 1 stack entry for the return address + 4 stack entries from
- // StackFrame::INTERNAL (FP, context, frame type, code object, see
- // MacroAssembler::EnterFrame). For a setter stub frame we need one additional
- // entry for the implicit return value, see
- // StoreStubCompiler::CompileStoreViaSetter.
- unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0);
- unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
- unsigned output_frame_size = height_in_bytes + fixed_frame_size;
-
- // Allocate and store the output frame description.
- FrameDescription* output_frame =
- new(output_frame_size) FrameDescription(output_frame_size, accessor);
- output_frame->SetFrameType(StackFrame::INTERNAL);
-
- // A frame for an accessor stub can not be the topmost or bottommost one.
- ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
- ASSERT(output_[frame_index] == NULL);
- output_[frame_index] = output_frame;
-
- // The top address of the frame is computed from the previous frame's top and
- // this frame's size.
- intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
- output_frame->SetTop(top_address);
-
- unsigned output_offset = output_frame_size;
-
- // Read caller's PC from the previous frame.
- output_offset -= kPointerSize;
- intptr_t callers_pc = output_[frame_index - 1]->GetPc();
- output_frame->SetFrameSlot(output_offset, callers_pc);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's pc\n",
- top_address + output_offset, output_offset, callers_pc);
- }
-
- // Read caller's FP from the previous frame, and set this frame's FP.
- output_offset -= kPointerSize;
- intptr_t value = output_[frame_index - 1]->GetFp();
- output_frame->SetFrameSlot(output_offset, value);
- intptr_t fp_value = top_address + output_offset;
- output_frame->SetFp(fp_value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; caller's fp\n",
- fp_value, output_offset, value);
- }
-
- // The context can be gotten from the previous frame.
- output_offset -= kPointerSize;
- value = output_[frame_index - 1]->GetContext();
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; context\n",
- top_address + output_offset, output_offset, value);
- }
-
- // A marker value is used in place of the function.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL));
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; function (%s sentinel)\n",
- top_address + output_offset, output_offset, value, kind);
- }
-
- // Get Code object from accessor stub.
- output_offset -= kPointerSize;
- Builtins::Name name = is_setter_stub_frame ?
- Builtins::kStoreIC_Setter_ForDeopt :
- Builtins::kLoadIC_Getter_ForDeopt;
- Code* accessor_stub = isolate_->builtins()->builtin(name);
- value = reinterpret_cast<intptr_t>(accessor_stub);
- output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
- PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR
- " ; code object\n",
- top_address + output_offset, output_offset, value);
- }
-
- // Skip receiver.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- iterator->Skip(Translation::NumberOfOperandsFor(opcode));
-
- if (is_setter_stub_frame) {
- // The implicit return value was part of the artificial setter stub
- // environment.
- output_offset -= kPointerSize;
- DoTranslateCommand(iterator, frame_index, output_offset);
- }
-
- ASSERT(0 == output_offset);
-
- Smi* offset = is_setter_stub_frame ?
- isolate_->heap()->setter_stub_deopt_pc_offset() :
- isolate_->heap()->getter_stub_deopt_pc_offset();
- intptr_t pc = reinterpret_cast<intptr_t>(
- accessor_stub->instruction_start() + offset->value());
- output_frame->SetPc(pc);
-}
-
-
void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
int frame_index) {
BailoutId node_id = BailoutId(iterator->Next());
@@ -712,7 +669,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
}
unsigned height = iterator->Next();
unsigned height_in_bytes = height * kPointerSize;
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" translating ");
function->PrintName();
PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes);
@@ -776,7 +733,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's pc\n",
top_address + output_offset, output_offset, value);
@@ -798,7 +755,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
output_frame->SetFp(fp_value);
if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR " ; caller's fp\n",
fp_value, output_offset, value);
@@ -817,7 +774,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
output_frame->SetFrameSlot(output_offset, value);
output_frame->SetContext(value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; context\n",
top_address + output_offset, output_offset, value);
@@ -831,7 +788,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
output_frame->SetFrameSlot(output_offset, value);
- if (FLAG_trace_deopt) {
+ if (trace_) {
PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
V8PRIxPTR "; function\n",
top_address + output_offset, output_offset, value);
@@ -878,7 +835,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
}
input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp()));
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
input_->SetDoubleRegister(i, 0.0);
}
@@ -898,10 +855,10 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kNumberOfRegisters = Register::kNumRegisters;
const int kDoubleRegsSize = kDoubleSize *
- XMMRegister::kNumAllocatableRegisters;
+ XMMRegister::NumAllocatableRegisters();
__ subq(rsp, Immediate(kDoubleRegsSize));
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ movsd(Operand(rsp, offset), xmm_reg);
@@ -990,7 +947,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
int dst_offset = i * kDoubleSize + double_regs_offset;
__ pop(Operand(rbx, dst_offset));
}
@@ -1011,10 +968,13 @@ void Deoptimizer::EntryGenerator::Generate() {
// limit and copy the contents of the activation frame to the input
// frame description.
__ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop_header;
+ __ jmp(&pop_loop_header);
Label pop_loop;
__ bind(&pop_loop);
__ pop(Operand(rdx, 0));
__ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ bind(&pop_loop_header);
__ cmpq(rcx, rsp);
__ j(not_equal, &pop_loop);
@@ -1031,32 +991,34 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(rax);
// Replace the current frame with the output frames.
- Label outer_push_loop, inner_push_loop;
+ Label outer_push_loop, inner_push_loop,
+ outer_loop_header, inner_loop_header;
// Outer loop state: rax = current FrameDescription**, rdx = one past the
// last FrameDescription**.
__ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
__ movq(rax, Operand(rax, Deoptimizer::output_offset()));
__ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ jmp(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: rbx = current FrameDescription*, rcx = loop index.
__ movq(rbx, Operand(rax, 0));
__ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ jmp(&inner_loop_header);
__ bind(&inner_push_loop);
__ subq(rcx, Immediate(sizeof(intptr_t)));
__ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ bind(&inner_loop_header);
__ testq(rcx, rcx);
__ j(not_zero, &inner_push_loop);
__ addq(rax, Immediate(kPointerSize));
+ __ bind(&outer_loop_header);
__ cmpq(rax, rdx);
__ j(below, &outer_push_loop);
- // In case of OSR, we have to restore the XMM registers.
- if (type() == OSR) {
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
- XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ movsd(xmm_reg, Operand(rbx, src_offset));
- }
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(rbx, src_offset));
}
// Push state, pc, and continuation from the last output frame.
diff --git a/src/3rdparty/v8/src/x64/disasm-x64.cc b/src/3rdparty/v8/src/x64/disasm-x64.cc
index c8606c4..fb0914d 100644
--- a/src/3rdparty/v8/src/x64/disasm-x64.cc
+++ b/src/3rdparty/v8/src/x64/disasm-x64.cc
@@ -1244,6 +1244,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x50) {
+ // movmskps reg, xmm
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
+ current += PrintRightXMMOperand(current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1724,6 +1731,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += F6F7Instruction(data);
break;
+ case 0x3C:
+ AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
+ data +=2;
+ break;
+
default:
UnimplementedInstruction();
data += 1;
diff --git a/src/3rdparty/v8/src/x64/frames-x64.h b/src/3rdparty/v8/src/x64/frames-x64.h
index 3e3d63d..c9092af 100644
--- a/src/3rdparty/v8/src/x64/frames-x64.h
+++ b/src/3rdparty/v8/src/x64/frames-x64.h
@@ -85,20 +85,6 @@ class ExitFrameConstants : public AllStatic {
};
-class StandardFrameConstants : public AllStatic {
- public:
- // Fixed part of the frame consists of return address, caller fp,
- // context and function.
- static const int kFixedFrameSize = 4 * kPointerSize;
- static const int kExpressionsOffset = -3 * kPointerSize;
- static const int kMarkerOffset = -2 * kPointerSize;
- static const int kContextOffset = -1 * kPointerSize;
- static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
-};
-
-
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
diff --git a/src/3rdparty/v8/src/x64/full-codegen-x64.cc b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
index a71c9b1..e236ce1 100644
--- a/src/3rdparty/v8/src/x64/full-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/full-codegen-x64.cc
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -127,7 +127,7 @@ void FullCodeGenerator::Generate() {
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -138,8 +138,6 @@ void FullCodeGenerator::Generate() {
// function calls.
if (!info->is_classic_mode() || info->is_native()) {
Label ok;
- Label begin;
- __ bind(&begin);
__ testq(rcx, rcx);
__ j(zero, &ok, Label::kNear);
// +1 for return address.
@@ -147,8 +145,6 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
__ movq(Operand(rsp, receiver_offset), kScratchRegister);
__ bind(&ok);
- ASSERT(!FLAG_age_code ||
- (kSizeOfFullCodegenStrictModePrologue == ok.pos() - begin.pos()));
}
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -156,6 +152,7 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ info->set_prologue_offset(masm_->pc_offset());
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -323,40 +320,33 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
__ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT);
__ movq(kScratchRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
__ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
kScratchRegister);
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -365,9 +355,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -764,8 +752,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current function
- // context.
+ // The variable in the declaration always resides in the current context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
@@ -794,7 +781,8 @@ void FullCodeGenerator::VisitVariableDeclaration(
? isolate()->factory()->the_hole_value()
: isolate()->factory()->undefined_value(),
zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
case Variable::PARAMETER:
@@ -854,7 +842,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
// Check for stack-overflow exception.
if (function.is_null()) return SetStackOverflow();
globals_->Add(function, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
+ globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()),
+ zone());
break;
}
@@ -898,34 +887,32 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- VariableProxy* proxy = declaration->proxy();
- Variable* variable = proxy->var();
- Handle<JSModule> instance = declaration->module()->interface()->Instance();
- ASSERT(!instance.is_null());
+ Variable* variable = declaration->proxy()->var();
+ ASSERT(variable->location() == Variable::CONTEXT);
+ ASSERT(variable->interface()->IsFrozen());
- switch (variable->location()) {
- case Variable::UNALLOCATED: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- globals_->Add(variable->name(), zone());
- globals_->Add(instance, zone());
- globals_->Add(isolate()->factory()->ToBoolean(variable->is_qml_global()), zone());
- Visit(declaration->module());
- break;
- }
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
- case Variable::CONTEXT: {
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
- __ Move(ContextOperand(rsi, variable->index()), instance);
- Visit(declaration->module());
- break;
- }
+ // Load instance object.
+ __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
+ __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
- case Variable::PARAMETER:
- case Variable::LOCAL:
- case Variable::LOOKUP:
- UNREACHABLE();
- }
+ // Assign it.
+ __ movq(ContextOperand(rsi, variable->index()), rax);
+ // We know that we have written a module, which is not a smi.
+ __ RecordWriteContextSlot(rsi,
+ Context::SlotOffset(variable->index()),
+ rax,
+ rcx,
+ kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+ // Traverse into body.
+ Visit(declaration->module());
}
@@ -967,6 +954,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+ // Call the runtime to declare the modules.
+ __ Push(descriptions);
+ __ CallRuntime(Runtime::kDeclareModules, 1);
+ // Return value is ignored.
+}
+
+
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1017,7 +1012,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
- Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId());
patch_site.EmitPatchInfo();
@@ -1145,7 +1140,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Handle<JSGlobalPropertyCell> cell =
isolate()->factory()->NewJSGlobalPropertyCell(
Handle<Object>(
- Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker)));
+ Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker),
+ isolate()));
RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell);
__ LoadHeapObject(rbx, cell);
__ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
@@ -1222,7 +1218,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1321,7 +1317,9 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
// All extension objects were empty and it is safe to use a global
// load IC call.
- __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ Move(rcx, var->name());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
@@ -1376,9 +1374,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == CONST ||
- local->mode() == CONST_HARMONY ||
- local->mode() == LET) {
+ if (local->mode() == LET ||
+ local->mode() == CONST ||
+ local->mode() == CONST_HARMONY) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -1406,7 +1404,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
__ Move(rcx, var->name());
- __ movq(rax, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
CallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(rax);
@@ -1561,24 +1561,34 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_properties);
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= expr->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ Push(Smi::FromInt(flags));
int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_properties);
+ __ Push(Smi::FromInt(flags));
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else if (flags != ObjectLiteral::kFastElements ||
+ } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_properties);
+ __ Push(Smi::FromInt(flags));
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
} else {
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_properties);
+ __ Move(rdx, Smi::FromInt(flags));
FastCloneShallowObjectStub stub(properties_count);
__ CallStub(&stub);
}
@@ -1610,7 +1620,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
// Fall through.
case ObjectLiteral::Property::COMPUTED:
- if (key->handle()->IsSymbol()) {
+ if (key->handle()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
__ Move(rcx, key->handle());
@@ -1699,6 +1709,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
@@ -1708,12 +1719,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
FLAG_smi_only_arrays);
+ FastCloneShallowArrayStub::Mode mode =
+ FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
- FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements
- ? FastCloneShallowArrayStub::CLONE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
+ if (has_constant_fast_elements) {
+ mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
+
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
@@ -1917,7 +1935,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ bind(&stub_call);
__ movq(rax, rcx);
BinaryOpStub stub(op, mode);
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
__ jmp(&done, Label::kNear);
@@ -1967,7 +1985,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
__ pop(rdx);
BinaryOpStub stub(op, mode);
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
context()->Plug(rax);
@@ -1975,7 +1993,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
void FullCodeGenerator::EmitAssignment(Expression* expr) {
- // Invalid left-hand sides are rewritten to have a 'throw
+ // Invalid left-hand sides are rewritten by the parser to have a 'throw
// ReferenceError' on the left-hand side.
if (!expr->IsValidLeftHandSide()) {
VisitForEffect(expr);
@@ -2035,7 +2053,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
if (var->IsUnallocated()) {
// Global var, const, or let.
__ Move(rcx, var->name());
- __ movq(rdx, var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rdx, var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = is_classic_mode()
? isolate()->builtins()->StoreIC_Initialize()
: isolate()->builtins()->StoreIC_Initialize_Strict();
@@ -2311,7 +2331,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval()) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -2348,7 +2368,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else if (proxy != NULL && proxy->var()->IsUnallocated()) {
// Call to a global variable. Push global object as receiver for the
// call IC lookup.
- __ push(proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ push(proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
EmitCallWithIC(expr, proxy->name(), RelocInfo::CODE_TARGET_CONTEXT);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
// Call to a lookup slot (dynamically introduced variable).
@@ -2451,7 +2473,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Move(rbx, cell);
CallConstructStub stub(RECORD_CALL_TARGET);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
context()->Plug(rax);
}
@@ -2606,7 +2628,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
__ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
__ j(equal, if_false);
- // Look for valueOf symbol in the descriptor array, and indicate false if
+ // Look for valueOf string in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
// transition that shares its descriptor array, this is a false positive.
Label entry, loop, done;
@@ -2628,11 +2650,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
// Calculate location of the first key name.
__ addq(rbx, Immediate(DescriptorArray::kFirstOffset));
// Loop through all the keys in the descriptor array. If one of these is the
- // symbol valueOf the result is false.
+ // internalized string "valueOf" the result is false.
__ jmp(&entry);
__ bind(&loop);
__ movq(rdx, FieldOperand(rbx, 0));
- __ Cmp(rdx, FACTORY->value_of_symbol());
+ __ Cmp(rdx, FACTORY->value_of_string());
__ j(equal, if_false);
__ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
@@ -2665,6 +2687,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
}
+void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT(args->length() == 1);
+
+ VisitForAccumulatorValue(args->at(0));
+
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ __ JumpIfSmi(rax, if_false);
+ __ CmpObjectType(rax, SYMBOL_TYPE, rbx);
+ PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+ Split(equal, if_true, if_false, fall_through);
+
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
ASSERT(args->length() == 1);
@@ -2867,12 +2911,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'.
__ bind(&function);
- __ Move(rax, isolate()->factory()->function_class_symbol());
+ __ Move(rax, isolate()->factory()->function_class_string());
__ jmp(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ Move(rax, isolate()->factory()->Object_symbol());
+ __ Move(rax, isolate()->factory()->Object_string());
__ jmp(&done);
// Non-JS objects have class null.
@@ -3034,10 +3078,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
__ PrepareCallCFunction(2);
#ifdef _WIN64
__ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE);
+ __ movq(rdx, index, RelocInfo::NONE64);
#else
__ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE);
+ __ movq(rsi, index, RelocInfo::NONE64);
#endif
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -3051,6 +3095,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ ASSERT_EQ(3, args->length());
+
+ VisitForStackValue(args->at(1)); // index
+ VisitForStackValue(args->at(2)); // value
+ __ pop(rcx);
+ __ pop(rbx);
+ VisitForAccumulatorValue(args->at(0)); // string
+
+ static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
+ SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3200,7 +3276,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
- __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(result, Heap::kempty_stringRootIndex);
__ jmp(&done);
__ bind(&need_conversion);
@@ -3526,7 +3602,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movq(array_length, FieldOperand(array, JSArray::kLengthOffset));
__ SmiCompare(array_length, Smi::FromInt(0));
__ j(not_zero, &non_trivial_array);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(rax, Heap::kempty_stringRootIndex);
__ jmp(&return_result);
// Save the array length on the stack.
@@ -3565,7 +3641,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ j(overflow, &bailout);
__ incl(index);
__ cmpl(index, array_length);
@@ -3612,7 +3688,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times (array_length - 1)) to string_length.
__ SmiToInteger32(scratch,
- FieldOperand(string, SeqAsciiString::kLengthOffset));
+ FieldOperand(string, SeqOneByteString::kLengthOffset));
__ decl(index);
__ imull(scratch, index);
__ j(overflow, &bailout);
@@ -3625,10 +3701,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
__ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
+ __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3654,7 +3730,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ bind(&loop_1_condition);
@@ -3672,7 +3748,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&one_char_separator);
// Get the separator ASCII character value.
// Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
+ __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
__ Set(index, 0);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3698,7 +3774,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ cmpl(index, array_length_operand);
@@ -3723,7 +3799,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ movq(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
@@ -3749,7 +3825,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqAsciiString::kHeaderSize));
+ FieldOperand(string, SeqOneByteString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
@@ -3825,7 +3901,9 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// but "delete this" is allowed.
ASSERT(language_mode() == CLASSIC_MODE || var->is_this());
if (var->IsUnallocated()) {
- __ push(var->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ push(var->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
__ Push(var->name());
__ Push(Smi::FromInt(kNonStrictMode));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
@@ -3952,7 +4030,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr,
// accumulator register rax.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET,
+ CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET,
expr->UnaryOperationFeedbackId());
context()->Plug(rax);
}
@@ -4067,14 +4145,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
- if (expr->op() == Token::INC) {
- __ Move(rdx, Smi::FromInt(1));
- } else {
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
- }
- CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
+ CallIC(stub.GetCode(isolate()),
+ RelocInfo::CODE_TARGET,
+ expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4148,7 +4224,9 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable");
__ Move(rcx, proxy->name());
- __ movq(rax, proxy->var()->is_qml_global() ? QmlGlobalObjectOperand() : GlobalObjectOperand());
+ __ movq(rax, proxy->var()->is_qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
@@ -4192,12 +4270,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- if (check->Equals(isolate()->heap()->number_symbol())) {
+ if (check->Equals(isolate()->heap()->number_string())) {
__ JumpIfSmi(rax, if_true);
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ CompareRoot(rax, Heap::kHeapNumberMapRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->string_symbol())) {
+ } else if (check->Equals(isolate()->heap()->string_string())) {
__ JumpIfSmi(rax, if_false);
// Check for undetectable objects => false.
__ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
@@ -4205,16 +4283,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->boolean_symbol())) {
+ } else if (check->Equals(isolate()->heap()->boolean_string())) {
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
__ j(equal, if_true);
__ CompareRoot(rax, Heap::kFalseValueRootIndex);
Split(equal, if_true, if_false, fall_through);
} else if (FLAG_harmony_typeof &&
- check->Equals(isolate()->heap()->null_symbol())) {
+ check->Equals(isolate()->heap()->null_string())) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->undefined_symbol())) {
+ } else if (check->Equals(isolate()->heap()->undefined_string())) {
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
__ j(equal, if_true);
__ JumpIfSmi(rax, if_false);
@@ -4223,19 +4301,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
Split(not_zero, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->function_symbol())) {
+ } else if (check->Equals(isolate()->heap()->function_string())) {
__ JumpIfSmi(rax, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
__ j(equal, if_true);
__ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
Split(equal, if_true, if_false, fall_through);
- } else if (check->Equals(isolate()->heap()->object_symbol())) {
+ } else if (check->Equals(isolate()->heap()->object_string())) {
__ JumpIfSmi(rax, if_false);
if (!FLAG_harmony_typeof) {
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
}
+ if (FLAG_harmony_symbols) {
+ __ CmpObjectType(rax, SYMBOL_TYPE, rdx);
+ __ j(equal, if_true);
+ }
__ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
__ j(below, if_false);
__ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -4292,29 +4374,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = no_condition;
- switch (op) {
- case Token::EQ_STRICT:
- case Token::EQ:
- cc = equal;
- break;
- case Token::LT:
- cc = less;
- break;
- case Token::GT:
- cc = greater;
- break;
- case Token::LTE:
- cc = less_equal;
- break;
- case Token::GTE:
- cc = greater_equal;
- break;
- case Token::IN:
- case Token::INSTANCEOF:
- default:
- UNREACHABLE();
- }
+ Condition cc = CompareIC::ComputeCondition(op);
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
@@ -4331,7 +4391,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC.
SetSourcePosition(expr->position());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
diff --git a/src/3rdparty/v8/src/x64/ic-x64.cc b/src/3rdparty/v8/src/x64/ic-x64.cc
index efa07a8..15423e4 100644
--- a/src/3rdparty/v8/src/x64/ic-x64.cc
+++ b/src/3rdparty/v8/src/x64/ic-x64.cc
@@ -101,8 +101,8 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm,
// Helper function used to load a property from a dictionary backing storage.
// This function may return false negatives, so miss_label
// must always call a backup property load that is complete.
-// This function is safe to call if name is not a symbol, and will jump to
-// the miss_label in that case.
+// This function is safe to call if name is not an internalized string,
+// and will jump to the miss_label in that case.
// The generated code assumes that the receiver has slow properties,
// is not a global object and does not have interceptors.
static void GenerateDictionaryLoad(MacroAssembler* masm,
@@ -160,8 +160,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// storage. This function may fail to store a property even though it
// is in the dictionary, so code at miss_label must always call a
// backup property store that is complete. This function is safe to
-// call if name is not a symbol, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
+// call if name is not an internalized string, and will jump to the miss_label
+// in that case. The generated code assumes that the receiver has slow
// properties, is not a global object and does not have interceptors.
static void GenerateDictionaryStore(MacroAssembler* masm,
Label* miss_label,
@@ -224,49 +224,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm,
}
-void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss,
- support_wrappers);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
-void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
- __ bind(&miss);
- StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
-}
-
-
// Checks the receiver for special cases (value type, slow case bits).
// Falls through for regular JS object.
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
@@ -356,31 +313,31 @@ static void GenerateFastArrayLoad(MacroAssembler* masm,
}
-// Checks whether a key is an array index string or a symbol string.
-// Falls through if the key is a symbol.
+// Checks whether a key is an array index string or an internalized string.
+// Falls through if the key is an internalized string.
static void GenerateKeyStringCheck(MacroAssembler* masm,
Register key,
Register map,
Register hash,
Label* index_string,
- Label* not_symbol) {
+ Label* not_internalized) {
// Register use:
// key - holds the key and is unchanged. Assumed to be non-smi.
// Scratch registers:
// map - used to hold the map of the key.
// hash - used to hold the hash of the key.
__ CmpObjectType(key, FIRST_NONSTRING_TYPE, map);
- __ j(above_equal, not_symbol);
+ __ j(above_equal, not_internalized);
// Is the string an array index, with cached numeric value?
__ movl(hash, FieldOperand(key, String::kHashFieldOffset));
__ testl(hash, Immediate(String::kContainsCachedArrayIndexMask));
__ j(zero, index_string); // The value in hash is used at jump target.
- // Is the string a symbol?
- STATIC_ASSERT(kSymbolTag != 0);
+ // Is the string internalized?
+ STATIC_ASSERT(kInternalizedTag != 0);
__ testb(FieldOperand(map, Map::kInstanceTypeOffset),
- Immediate(kIsSymbolMask));
- __ j(zero, not_symbol);
+ Immediate(kIsInternalizedMask));
+ __ j(zero, not_internalized);
}
@@ -464,7 +421,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
__ and_(rcx, Immediate(mask));
- // Load the key (consisting of map and symbol) from the cache and
+ // Load the key (consisting of map and internalized string) from the cache and
// check for match.
Label load_in_object_property;
static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
@@ -576,7 +533,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&miss);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -619,7 +576,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1);
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -709,7 +666,9 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&fast_double_without_map_check);
@@ -720,7 +679,9 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm);
+ mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
+ slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
@@ -734,7 +695,8 @@ static void KeyedStoreGenerateGenericHelper(
rbx,
rdi,
slow);
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow);
+ mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
__ jmp(&finish_object_store);
}
@@ -1113,7 +1075,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&check_string);
GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call);
- // The key is known to be a symbol.
+ // The key is known to be an internalized string.
// If the receiver is a regular JS object with slow properties then do
// a quick inline probe of the receiver's dictionary.
// Otherwise do the monomorphic cache probe.
@@ -1140,7 +1102,7 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
__ bind(&slow_call);
// This branch is taken if:
// - the receiver requires boxing or access check,
- // - the key is neither smi nor symbol,
+ // - the key is neither smi nor internalized string,
// - the value loaded is not a function,
// - there is hope that the runtime will create a monomorphic call stub
// that will get fetched next time.
@@ -1278,7 +1240,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
__ movq(rax, unmapped_location);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1317,7 +1279,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
INLINE_SMI_CHECK);
__ Ret();
__ bind(&slow);
- GenerateMiss(masm, false);
+ GenerateMiss(masm, MISS);
}
@@ -1359,9 +1321,10 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// -----------------------------------
// Probe the stub cache.
- Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC);
- Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx,
- rdx);
+ Code::Flags flags = Code::ComputeFlags(
+ Code::LOAD_IC, MONOMORPHIC, Code::HANDLER_FRAGMENT);
+ Isolate::Current()->stub_cache()->GenerateProbe(
+ masm, flags, rax, rcx, rbx, rdx);
// Cache miss: Jump to runtime.
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1411,7 +1374,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
}
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -1427,7 +1390,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(rbx); // return address
// Perform tail call to the entry.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
@@ -1493,65 +1456,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
}
-void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : value
- // -- rcx : name
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- //
- // This accepts as a receiver anything JSArray::SetElementsLength accepts
- // (currently anything except for external arrays which means anything with
- // elements of FixedArray type). Value must be a number, but only smis are
- // accepted as the most common case.
-
- Label miss;
-
- Register receiver = rdx;
- Register value = rax;
- Register scratch = rbx;
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, &miss);
-
- // Check that the object is a JS array.
- __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that elements are FixedArray.
- // We rely on StoreIC_ArrayLength below to deal with all types of
- // fast elements (including COW).
- __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset));
- __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch);
- __ j(not_equal, &miss);
-
- // Check that the array has fast properties, otherwise the length
- // property might have been redefined.
- __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset));
- __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset),
- Heap::kHashTableMapRootIndex);
- __ j(equal, &miss);
-
- // Check that value is a smi.
- __ JumpIfNotSmi(value, &miss);
-
- // Prepare tail call to StoreIC_ArrayLength.
- __ pop(scratch);
- __ push(receiver);
- __ push(value);
- __ push(scratch); // return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate());
- __ TailCallExternalReference(ref, 2, 1);
-
- __ bind(&miss);
-
- GenerateMiss(masm);
-}
-
-
void StoreIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : value
@@ -1638,7 +1542,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
}
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : key
@@ -1653,7 +1557,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
__ push(rbx); // return address
// Do tail-call to runtime routine.
- ExternalReference ref = force_generic
+ ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
masm->isolate())
: ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
@@ -1670,7 +1574,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) {
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS);
+ ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail);
__ movq(rax, rdx);
__ Ret();
__ bind(&fail);
@@ -1693,7 +1599,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject(
// Must return the modified receiver in eax.
if (!FLAG_trace_elements_transitions) {
Label fail;
- ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail);
+ AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS);
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail);
__ movq(rax, rdx);
__ Ret();
__ bind(&fail);
@@ -1729,7 +1637,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-static bool HasInlinedSmiCode(Address address) {
+bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1740,39 +1648,6 @@ static bool HasInlinedSmiCode(Address address) {
}
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- Handle<Code> rewritten;
- State previous_state = GetState();
-
- State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
- if (state == GENERIC) {
- CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
- rewritten = stub.GetCode();
- } else {
- ICCompareStub stub(op_, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- rewritten = stub.GetCode();
- }
- set_target(*rewritten);
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC (%s->%s)#%s]\n",
- GetStateName(previous_state),
- GetStateName(state),
- Token::Name(op_));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
-}
-
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
index a948ccc..f1fe452 100644
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -88,7 +88,14 @@ void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+ if (FLAG_weak_embedded_maps_in_optimized_code) {
+ RegisterDependentCodeForEmbeddedMaps(code);
+ }
PopulateDeoptimizationData(code);
+ for (int i = 0 ; i < prototype_maps_.length(); i++) {
+ prototype_maps_.at(i)->AddDependentCode(
+ DependentCode::kPrototypeCheckGroup, code);
+ }
}
@@ -119,50 +126,61 @@ void LCodeGen::Comment(const char* format, ...) {
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
- ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+ if (info()->IsOptimizing()) {
+ ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
- if (strlen(FLAG_stop_at) > 0 &&
- info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
- __ int3();
- }
+ if (strlen(FLAG_stop_at) > 0 &&
+ info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ __ int3();
+ }
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). rcx is zero for method calls and non-zero for function
- // calls.
- if (!info_->is_classic_mode() || info_->is_native()) {
- Label begin;
- __ bind(&begin);
- Label ok;
- __ testq(rcx, rcx);
- __ j(zero, &ok, Label::kNear);
- // +1 for return address.
- int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
- __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
- __ movq(Operand(rsp, receiver_offset), kScratchRegister);
- __ bind(&ok);
- ASSERT(!FLAG_age_code ||
- (kSizeOfOptimizedStrictModePrologue == ok.pos() - begin.pos()));
+ // Strict mode functions need to replace the receiver with undefined
+ // when called as functions (without an explicit receiver
+ // object). rcx is zero for method calls and non-zero for function
+ // calls.
+ if (!info_->is_classic_mode() || info_->is_native()) {
+ Label ok;
+ __ testq(rcx, rcx);
+ __ j(zero, &ok, Label::kNear);
+ // +1 for return address.
+ int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
+ __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+ __ movq(Operand(rsp, receiver_offset), kScratchRegister);
+ __ bind(&ok);
+ }
}
- __ push(rbp); // Caller's frame pointer.
- __ movq(rbp, rsp);
- __ push(rsi); // Callee's context.
- __ push(rdi); // Callee's JS function.
+ info()->set_prologue_offset(masm_->pc_offset());
+ if (NeedsEagerFrame()) {
+ ASSERT(!frame_is_built_);
+ frame_is_built_ = true;
+ __ push(rbp); // Caller's frame pointer.
+ __ movq(rbp, rsp);
+ __ push(rsi); // Callee's context.
+ if (info()->IsStub()) {
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ } else {
+ __ push(rdi); // Callee's JS function.
+ }
+ }
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
+ __ subq(rsp, Immediate(slots * kPointerSize));
+ __ push(rax);
__ Set(rax, slots);
- __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE);
+ __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64);
Label loop;
__ bind(&loop);
- __ push(kScratchRegister);
+ __ movq(MemOperand(rsp, rax, times_pointer_size, 0),
+ kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
+ __ pop(rax);
} else {
__ subq(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
@@ -177,12 +195,25 @@ bool LCodeGen::GeneratePrologue() {
}
#endif
}
+
+ if (info()->saves_caller_doubles()) {
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ movsd(MemOperand(rsp, count * kDoubleSize),
+ XMMRegister::FromAllocationIndex(save_iterator.Current()));
+ save_iterator.Advance();
+ count++;
+ }
+ }
}
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0 ||
- (scope()->is_qml_mode() && scope()->is_global_scope())) {
+ (scope() != NULL && scope()->is_qml_mode() && scope()->is_global_scope())) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
__ push(rdi);
@@ -217,7 +248,7 @@ bool LCodeGen::GeneratePrologue() {
}
// Trace the call.
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
@@ -237,7 +268,30 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ if (FLAG_code_comments) {
+ HValue* hydrogen = instr->hydrogen_value();
+ if (hydrogen != NULL) {
+ if (hydrogen->IsChange()) {
+ HValue* changed_value = HChange::cast(hydrogen)->value();
+ int use_id = 0;
+ const char* use_mnemo = "dead";
+ if (hydrogen->UseCount() >= 1) {
+ HValue* use_value = hydrogen->uses().value();
+ use_id = use_value->id();
+ use_mnemo = use_value->Mnemonic();
+ }
+ Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
+ current_instruction_, instr->Mnemonic(),
+ changed_value->id(), changed_value->Mnemonic(),
+ use_id, use_mnemo);
+ } else {
+ Comment(";;; @%d: %s. <#%d>", current_instruction_,
+ instr->Mnemonic(), hydrogen->id());
+ }
+ } else {
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
+ }
+ }
instr->CompileToNative(this);
}
}
@@ -247,9 +301,64 @@ bool LCodeGen::GenerateBody() {
bool LCodeGen::GenerateJumpTable() {
+ Label needs_frame_not_call;
+ Label needs_frame_is_call;
for (int i = 0; i < jump_table_.length(); i++) {
__ bind(&jump_table_[i].label);
- __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY);
+ Address entry = jump_table_[i].address;
+ bool is_lazy_deopt = jump_table_[i].is_lazy_deopt;
+ Deoptimizer::BailoutType type =
+ is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+ int id = Deoptimizer::GetDeoptimizationId(entry, type);
+ if (id == Deoptimizer::kNotDeoptimizationEntry) {
+ Comment(";;; jump table entry %d.", i);
+ } else {
+ Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+ }
+ if (jump_table_[i].needs_frame) {
+ __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+ if (is_lazy_deopt) {
+ if (needs_frame_is_call.is_bound()) {
+ __ jmp(&needs_frame_is_call);
+ } else {
+ __ bind(&needs_frame_is_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
+ }
+ } else {
+ if (needs_frame_not_call.is_bound()) {
+ __ jmp(&needs_frame_not_call);
+ } else {
+ __ bind(&needs_frame_not_call);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ jmp(kScratchRegister);
+ }
+ }
+ } else {
+ if (is_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
+ }
}
return !is_aborted();
}
@@ -261,10 +370,32 @@ bool LCodeGen::GenerateDeferredCode() {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
__ bind(code->entry());
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred build frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(!frame_is_built_);
+ ASSERT(info()->IsStub());
+ frame_is_built_ = true;
+ // Build the frame in such a way that esi isn't trashed.
+ __ push(rbp); // Caller's frame pointer.
+ __ push(Operand(rbp, StandardFrameConstants::kContextOffset));
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ __ lea(rbp, Operand(rsp, 2 * kPointerSize));
+ }
Comment(";;; Deferred code @%d: %s.",
code->instruction_index(),
code->instr()->Mnemonic());
code->Generate();
+ if (NeedsDeferredFrame()) {
+ Comment(";;; Deferred destroy frame",
+ code->instruction_index(),
+ code->instr()->Mnemonic());
+ ASSERT(frame_is_built_);
+ frame_is_built_ = false;
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
__ jmp(code->exit());
}
}
@@ -319,8 +450,6 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const {
int LCodeGen::ToInteger32(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
- ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
- ASSERT(constant->HasInteger32Value());
return constant->Integer32Value();
}
@@ -343,22 +472,14 @@ Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
- int index = op->index();
- if (index >= 0) {
- // Local or spill slot. Skip the frame pointer, function, and
- // context in the fixed part of the frame.
- return Operand(rbp, -(index + 3) * kPointerSize);
- } else {
- // Incoming parameter. Skip the return address.
- return Operand(rbp, -(index - 1) * kPointerSize);
- }
+ return Operand(rbp, StackSlotOffset(op->index()));
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation,
- int* arguments_index,
- int* arguments_count) {
+ int* pushed_arguments_index,
+ int* pushed_arguments_count) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
@@ -370,14 +491,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
// arguments index points to the first element of a sequence of tagged
// values on the stack that represent the arguments. This needs to be
// kept in sync with the LArgumentsElements implementation.
- *arguments_index = -environment->parameter_count();
- *arguments_count = environment->parameter_count();
+ *pushed_arguments_index = -environment->parameter_count();
+ *pushed_arguments_count = environment->parameter_count();
WriteTranslation(environment->outer(),
translation,
- arguments_index,
- arguments_count);
- int closure_id = *info()->closure() != *environment->closure()
+ pushed_arguments_index,
+ pushed_arguments_count);
+ bool has_closure_id = !info()->closure().is_null() &&
+ *info()->closure() != *environment->closure();
+ int closure_id = has_closure_id
? DefineDeoptimizationLiteral(environment->closure())
: Translation::kSelfLiteralId;
@@ -401,16 +524,26 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
case ARGUMENTS_ADAPTOR:
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
break;
+ case STUB:
+ translation->BeginCompiledStubFrame();
+ break;
}
// Inlined frames which push their arguments cause the index to be
- // bumped and a new stack area to be used for materialization.
- if (environment->entry() != NULL &&
- environment->entry()->arguments_pushed()) {
- *arguments_index = *arguments_index < 0
- ? GetStackSlotCount()
- : *arguments_index + *arguments_count;
- *arguments_count = environment->entry()->arguments_count() + 1;
+ // bumped and another stack area to be used for materialization,
+ // otherwise actual argument values are unknown for inlined frames.
+ bool arguments_known = true;
+ int arguments_index = *pushed_arguments_index;
+ int arguments_count = *pushed_arguments_count;
+ if (environment->entry() != NULL) {
+ arguments_known = environment->entry()->arguments_pushed();
+ arguments_index = arguments_index < 0
+ ? GetStackSlotCount() : arguments_index + arguments_count;
+ arguments_count = environment->entry()->arguments_count() + 1;
+ if (environment->entry()->arguments_pushed()) {
+ *pushed_arguments_index = arguments_index;
+ *pushed_arguments_count = arguments_count;
+ }
}
for (int i = 0; i < translation_size; ++i) {
@@ -425,8 +558,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_registers()[value->index()],
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
} else if (
value->IsDoubleRegister() &&
environment->spilled_double_registers()[value->index()] != NULL) {
@@ -436,8 +570,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
environment->spilled_double_registers()[value->index()],
false,
false,
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -445,8 +580,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
value,
environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i),
- *arguments_index,
- *arguments_count);
+ arguments_known,
+ arguments_index,
+ arguments_count);
}
}
@@ -455,13 +591,15 @@ void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count) {
if (op == NULL) {
// TODO(twuerthinger): Introduce marker operands to indicate that this value
// is not present and must be reconstructed from the deoptimizer. Currently
// this is only used for the arguments object.
- translation->StoreArgumentsObject(arguments_index, arguments_count);
+ translation->StoreArgumentsObject(
+ arguments_known, arguments_index, arguments_count);
} else if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
@@ -591,22 +729,76 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(info()->IsOptimizing() || info()->IsStub());
+ Deoptimizer::BailoutType bailout_type = info()->IsStub()
+ ? Deoptimizer::LAZY
+ : Deoptimizer::EAGER;
+ Address entry =
+ Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort("bailout was not prepared");
return;
}
- if (cc == no_condition) {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64.
+
+ if (FLAG_trap_on_deopt) {
+ Label done;
+ if (cc != no_condition) {
+ __ j(NegateCondition(cc), &done, Label::kNear);
+ }
+ __ int3();
+ __ bind(&done);
+ }
+
+ ASSERT(info()->IsStub() || frame_is_built_);
+ bool needs_lazy_deopt = info()->IsStub();
+ if (cc == no_condition && frame_is_built_) {
+ if (needs_lazy_deopt) {
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ }
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (jump_table_.is_empty() ||
- jump_table_.last().address != entry) {
- jump_table_.Add(JumpTableEntry(entry), zone());
+ jump_table_.last().address != entry ||
+ jump_table_.last().needs_frame != !frame_is_built_ ||
+ jump_table_.last().is_lazy_deopt != needs_lazy_deopt) {
+ JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
+ jump_table_.Add(table_entry, zone());
+ }
+ if (cc == no_condition) {
+ __ jmp(&jump_table_.last().label);
+ } else {
+ __ j(cc, &jump_table_.last().label);
}
- __ j(cc, &jump_table_.last().label);
+ }
+}
+
+
+void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
+ ZoneList<Handle<Map> > maps(1, zone());
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT &&
+ it.rinfo()->target_object()->IsMap()) {
+ Handle<Map> map(Map::cast(it.rinfo()->target_object()));
+ if (map->CanTransition()) {
+ maps.Add(map, zone());
+ }
+ }
+ }
+#ifdef VERIFY_HEAP
+ // This disables verification of weak embedded maps after full GC.
+ // AddDependentCode can cause a GC, which would observe the state where
+ // this code is not yet in the depended code lists of the embedded maps.
+ NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
+#endif
+ for (int i = 0; i < maps.length(); i++) {
+ maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
}
}
@@ -776,38 +968,38 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpConstructResult: {
RegExpConstructResultStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::RegExpExec: {
RegExpExecStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::NumberToString: {
NumberToStringStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringAdd: {
StringAddStub stub(NO_STRING_ADD_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::TranscendentalCache: {
TranscendentalCacheStub stub(instr->transcendental_type(),
TranscendentalCacheStub::TAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
break;
}
default:
@@ -901,6 +1093,17 @@ void LCodeGen::DoModI(LModI* instr) {
// Slow case, using idiv instruction.
__ bind(&slow);
+
+ // Check for (kMinInt % -1).
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ Label left_not_min_int;
+ __ cmpl(left_reg, Immediate(kMinInt));
+ __ j(not_zero, &left_not_min_int, Label::kNear);
+ __ cmpl(right_reg, Immediate(-1));
+ DeoptimizeIf(zero, instr->environment());
+ __ bind(&left_not_min_int);
+ }
+
// Sign extend eax to edx.
// (We are using only the low 32 bits of the values.)
__ cdq();
@@ -1007,7 +1210,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
__ neg(reg1);
DeoptimizeIf(zero, instr->environment());
}
- __ movq(reg2, multiplier, RelocInfo::NONE);
+ __ movq(reg2, multiplier, RelocInfo::NONE64);
// Result just fit in r64, because it's int32 * uint32.
__ imul(reg2, reg1);
@@ -1018,6 +1221,43 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
+ if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
+ Register dividend = ToRegister(instr->left());
+ int32_t divisor =
+ HConstant::cast(instr->hydrogen()->right())->Integer32Value();
+ int32_t test_value = 0;
+ int32_t power = 0;
+
+ if (divisor > 0) {
+ test_value = divisor - 1;
+ power = WhichPowerOf2(divisor);
+ } else {
+ // Check for (0 / -x) that will produce negative zero.
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ __ testl(dividend, dividend);
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // Check for (kMinInt / -1).
+ if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ __ cmpl(dividend, Immediate(kMinInt));
+ DeoptimizeIf(zero, instr->environment());
+ }
+ test_value = - divisor - 1;
+ power = WhichPowerOf2(-divisor);
+ }
+
+ if (test_value != 0) {
+ // Deoptimize if remainder is not 0.
+ __ testl(dividend, Immediate(test_value));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ sarl(dividend, Immediate(power));
+ }
+
+ if (divisor < 0) __ negl(dividend);
+
+ return;
+ }
+
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(ToRegister(instr->left()).is(rax));
@@ -1028,13 +1268,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
Register right_reg = ToRegister(right);
- if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr->environment());
}
// Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label left_not_zero;
__ testl(left_reg, left_reg);
__ j(not_zero, &left_not_zero, Label::kNear);
@@ -1043,8 +1283,8 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (-kMinInt / -1).
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ // Check for (kMinInt / -1).
+ if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
__ j(not_zero, &left_not_min_int, Label::kNear);
@@ -1057,9 +1297,19 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ cdq();
__ idivl(right_reg);
- // Deoptimize if remainder is not 0.
- __ testl(rdx, rdx);
- DeoptimizeIf(not_zero, instr->environment());
+ if (!instr->is_flooring()) {
+ // Deoptimize if remainder is not 0.
+ __ testl(rdx, rdx);
+ DeoptimizeIf(not_zero, instr->environment());
+ } else {
+ Label done;
+ __ testl(rdx, rdx);
+ __ j(zero, &done, Label::kNear);
+ __ xorl(rdx, right_reg);
+ __ sarl(rdx, Immediate(31));
+ __ addl(rax, rdx);
+ __ bind(&done);
+ }
}
@@ -1406,10 +1656,10 @@ void LCodeGen::DoDateField(LDateField* instr) {
__ PrepareCallCFunction(2);
#ifdef _WIN64
__ movq(rcx, object);
- __ movq(rdx, index, RelocInfo::NONE);
+ __ movq(rdx, index, RelocInfo::NONE64);
#else
__ movq(rdi, object);
- __ movq(rsi, index, RelocInfo::NONE);
+ __ movq(rsi, index, RelocInfo::NONE64);
#endif
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
@@ -1418,6 +1668,15 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+ SeqStringSetCharGenerator::Generate(masm(),
+ instr->encoding(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->value()));
+}
+
+
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -1470,17 +1729,17 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (right->IsConstantOperand()) {
Immediate right_imm =
Immediate(ToInteger32(LConstantOperand::cast(right)));
- __ cmpq(left_reg, right_imm);
+ __ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
- __ cmpq(left_reg, right_reg);
+ __ cmpl(left_reg, right_reg);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
- __ cmpq(left_reg, right_op);
+ __ cmpl(left_reg, right_op);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_op);
}
@@ -1540,6 +1799,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
break;
case Token::DIV:
__ divsd(left, right);
+ __ movaps(left, left);
break;
case Token::MOD:
__ PrepareCallCFunction(2);
@@ -1563,7 +1823,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
BinaryOpStub stub(instr->op(), NO_OVERWRITE);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ nop(); // Signals no inlined code.
}
@@ -1957,7 +2217,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2041,7 +2301,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ JumpIfSmi(input, is_false);
- if (class_name->IsEqualTo(CStrVector("Function"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2072,7 +2332,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Objects with a non-function constructor have class 'Object'.
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
- if (class_name->IsEqualTo(CStrVector("Object"))) {
+ if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
@@ -2083,13 +2343,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
__ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ movq(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
- // The class name we are testing against is a symbol because it's a literal.
- // The name in the constructor is a symbol because of the way the context is
- // booted. This routine isn't expected to work for random API-created
+ // The class name we are testing against is internalized since it's a literal.
+ // The name in the constructor is internalized because of the way the context
+ // is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
- // syntax. Since both sides are symbols it is sufficient to use an identity
- // comparison.
- ASSERT(class_name->IsSymbol());
+ // syntax. Since both sides are internalized it is sufficient to use an
+ // identity comparison.
+ ASSERT(class_name->IsInternalizedString());
__ Cmp(temp, class_name);
// End with the answer in the z flag.
}
@@ -2127,7 +2387,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
InstanceofStub stub(InstanceofStub::kNoFlags);
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testq(rax, rax);
__ j(zero, &true_value, Label::kNear);
@@ -2226,7 +2486,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(),
+ CallCodeGeneric(stub.GetCode(isolate()),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
@@ -2250,10 +2510,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
}
+void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
+ Register object = ToRegister(instr->object());
+ Register result = ToRegister(instr->result());
+ __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset));
+}
+
+
void LCodeGen::DoCmpT(LCmpT* instr) {
Token::Value op = instr->op();
- Handle<Code> ic = CompareIC::GetUninitialized(op);
+ Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
@@ -2269,15 +2537,33 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
void LCodeGen::DoReturn(LReturn* instr) {
- if (FLAG_trace) {
+ if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime
// call to return the value in the same register.
__ push(rax);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- __ movq(rsp, rbp);
- __ pop(rbp);
- __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ if (info()->saves_caller_doubles()) {
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(rsp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+ if (NeedsEagerFrame()) {
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ }
+ if (info()->IsStub()) {
+ __ Ret(0, r10);
+ } else {
+ __ Ret((GetParameterCount() + 1) * kPointerSize, rcx);
+ }
}
@@ -2625,30 +2911,24 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-template <class T>
-inline void LCodeGen::PrepareKeyForKeyedOp(T* hydrogen_instr, LOperand* key) {
- if (ArrayOpClobbersKey<T>(hydrogen_instr)) {
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
// Even though the HLoad/StoreKeyed (in this case) instructions force
// the input representation for the key to be an integer, the input
// gets replaced during bound check elimination with the index argument
// to the bounds check, which can be tagged, so that case must be
// handled here, too.
- Register key_reg = ToRegister(key);
- if (hydrogen_instr->key()->representation().IsTagged()) {
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
- } else if (hydrogen_instr->IsDehoisted()) {
+ } else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
-}
-
-
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- PrepareKeyForKeyedOp(instr->hydrogen(), key);
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
@@ -2708,7 +2988,21 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
- PrepareKeyForKeyedOp<HLoadKeyed>(instr->hydrogen(), key);
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyed instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
sizeof(kHoleNanLower32);
@@ -2735,7 +3029,21 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
- PrepareKeyForKeyedOp<HLoadKeyed>(instr->hydrogen(), key);
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that
+ // case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
// Load the result.
__ movq(result,
@@ -2989,7 +3297,9 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
Register result = ToRegister(instr->result());
- __ movq(result, instr->qml_global()?QmlGlobalObjectOperand():GlobalObjectOperand());
+ __ movq(result, instr->qml_global()
+ ? QmlGlobalObjectOperand()
+ : GlobalObjectOperand());
}
@@ -3180,7 +3490,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DeoptimizeIf(equal, instr->environment());
} else {
Label negative_sign, done;
- // Deoptimize on negative inputs.
+ // Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr->environment());
@@ -3224,45 +3534,59 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
const XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
+ static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
- Label done;
- // xmm_scratch = 0.5
- __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ Label done, round_to_zero, below_one_half, do_not_compensate, restore;
+ __ movq(kScratchRegister, one_half, RelocInfo::NONE64);
__ movq(xmm_scratch, kScratchRegister);
- Label below_half;
__ ucomisd(xmm_scratch, input_reg);
- // If input_reg is NaN, this doesn't jump.
- __ j(above, &below_half, Label::kNear);
- // input = input + 0.5
- // This addition might give a result that isn't the correct for
- // rounding, due to loss of precision, but only for a number that's
- // so big that the conversion below will overflow anyway.
+ __ j(above, &below_one_half);
+
+ // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
- // Compute Math.floor(input).
- // Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x80000000));
+ __ RecordComment("D2I conversion overflow");
+ DeoptimizeIf(equal, instr->environment());
+ __ jmp(&done);
+
+ __ bind(&below_one_half);
+ __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64);
+ __ movq(xmm_scratch, kScratchRegister);
+ __ ucomisd(xmm_scratch, input_reg);
+ __ j(below_equal, &round_to_zero);
+
+ // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+ // compare and compensate.
+ __ movq(kScratchRegister, input_reg); // Back up input_reg.
+ __ subsd(input_reg, xmm_scratch);
+ __ cvttsd2si(output_reg, input_reg);
+ // Catch minint due to overflow, and to prevent overflow when compensating.
+ __ cmpl(output_reg, Immediate(0x80000000));
+ __ RecordComment("D2I conversion overflow");
DeoptimizeIf(equal, instr->environment());
+
+ __ cvtlsi2sd(xmm_scratch, output_reg);
+ __ ucomisd(input_reg, xmm_scratch);
+ __ j(equal, &restore, Label::kNear);
+ __ subl(output_reg, Immediate(1));
+ // No overflow because we already ruled out minint.
+ __ bind(&restore);
+ __ movq(input_reg, kScratchRegister); // Restore input_reg.
__ jmp(&done);
- __ bind(&below_half);
+ __ bind(&round_to_zero);
+ // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+ // we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bailout if negative (including -0).
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
+ __ RecordComment("Minus zero");
DeoptimizeIf(negative, instr->environment());
- } else {
- // Bailout if below -0.5, otherwise round to (positive) zero, even
- // if negative.
- // xmm_scrach = -0.5
- __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm_scratch, kScratchRegister);
- __ ucomisd(input_reg, xmm_scratch);
- DeoptimizeIf(below, instr->environment());
}
- __ xorl(output_reg, output_reg);
-
+ __ Set(output_reg, 0);
__ bind(&done);
}
@@ -3285,7 +3609,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
- __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
@@ -3390,8 +3714,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
// Only operate on the lower 32 bit of rax.
- __ movl(rdx, rax);
- __ andl(rdx, Immediate(0xFFFF));
+ __ movzxwl(rdx, rax);
__ imull(rdx, rdx, Immediate(18273));
__ shrl(rax, Immediate(16));
__ addl(rax, rdx);
@@ -3399,8 +3722,7 @@ void LCodeGen::DoRandom(LRandom* instr) {
__ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax);
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
- __ movl(rdx, rcx);
- __ andl(rdx, Immediate(0xFFFF));
+ __ movzxwl(rdx, rcx);
__ imull(rdx, rdx, Immediate(36969));
__ shrl(rcx, Immediate(16));
__ addl(rcx, rdx);
@@ -3416,10 +3738,10 @@ void LCodeGen::DoRandom(LRandom* instr) {
// Convert 32 random bits in rax to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single.
- __ movd(xmm2, rcx);
+ __ movq(rcx, V8_INT64_C(0x4130000000000000),
+ RelocInfo::NONE64); // 1.0 x 2^20 as double
+ __ movq(xmm2, rcx);
__ movd(xmm1, rax);
- __ cvtss2sd(xmm2, xmm2);
__ xorps(xmm1, xmm2);
__ subsd(xmm1, xmm2);
}
@@ -3433,11 +3755,21 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
+void LCodeGen::DoMathExp(LMathExp* instr) {
+ XMMRegister input = ToDoubleRegister(instr->value());
+ XMMRegister result = ToDoubleRegister(instr->result());
+ Register temp1 = ToRegister(instr->temp1());
+ Register temp2 = ToRegister(instr->temp2());
+
+ MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
+}
+
+
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3445,7 +3777,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3453,7 +3785,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3461,7 +3793,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3553,7 +3885,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
int arity = instr->arity();
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3584,9 +3916,27 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->constructor()).is(rdi));
ASSERT(ToRegister(instr->result()).is(rax));
+ __ Set(rax, instr->arity());
+ if (FLAG_optimize_constructed_arrays) {
+ // No cell in ebx for construct type feedback in optimized code
+ Handle<Object> undefined_value(isolate()->factory()->undefined_value());
+ __ Move(rbx, undefined_value);
+ }
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+ ASSERT(ToRegister(instr->constructor()).is(rdi));
+ ASSERT(ToRegister(instr->result()).is(rax));
+ ASSERT(FLAG_optimize_constructed_arrays);
+
__ Set(rax, instr->arity());
- CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+ __ Move(rbx, instr->hydrogen()->property_cell());
+ Handle<Code> array_construct_code =
+ isolate()->builtins()->ArrayConstructCode();
+ CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3667,28 +4017,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
-void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand) {
- if (value->representation().IsTagged() && !value->type().IsSmi()) {
- Condition cc;
- if (operand->IsRegister()) {
- cc = masm()->CheckSmi(ToRegister(operand));
- } else {
- cc = masm()->CheckSmi(ToOperand(operand));
- }
- DeoptimizeIf(NegateCondition(cc), environment);
- }
-}
-
-
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->length(),
- instr->length());
- DeoptIfTaggedButNotSmi(instr->environment(),
- instr->hydrogen()->index(),
- instr->index());
+ if (instr->hydrogen()->skip_check()) return;
+
if (instr->length()->IsRegister()) {
Register reg = ToRegister(instr->length());
if (!instr->hydrogen()->length()->representation().IsTagged()) {
@@ -3730,7 +4061,21 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
- PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the input
+ // gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
@@ -3780,7 +4125,22 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
- PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+
if (instr->NeedsCanonicalization()) {
Label have_value;
@@ -3809,7 +4169,21 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->elements());
LOperand* key = instr->key();
- PrepareKeyForKeyedOp<HStoreKeyed>(instr->hydrogen(), key);
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force
+ // the input representation for the key to be an integer, the
+ // input gets replaced during bound check elimination with the index
+ // argument to the bounds check, which can be tagged, so that case
+ // must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
Operand operand =
BuildFastArrayOperand(instr->elements(),
@@ -3864,28 +4238,40 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
- ElementsKind from_kind = from_map->elements_kind();
- ElementsKind to_kind = to_map->elements_kind();
+ ElementsKind from_kind = instr->from_kind();
+ ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
- __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
ASSERT_NE(instr->temp(), NULL);
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
ToRegister(instr->temp()), kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this);
+ if (!object_reg.is(rax)) {
+ __ movq(rax, object_reg);
+ }
+ __ Move(rbx, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
+ IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(rbx));
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
__ movq(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@@ -3893,7 +4279,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(rdx));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(rbx));
+ __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
__ movq(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@@ -3904,11 +4292,19 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ __ TestJSArrayForAllocationSiteInfo(object, temp);
+ DeoptimizeIf(equal, instr->environment());
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
EmitPushTaggedOperand(instr->left());
EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
@@ -3983,7 +4379,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
Register result = ToRegister(instr->result());
ASSERT(!char_code.is(result));
- __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode));
+ __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
__ j(above, deferred->entry());
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ movq(result, FieldOperand(result,
@@ -4130,6 +4526,36 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ XMMRegister input_reg = ToDoubleRegister(instr->value());
+ __ ucomisd(input_reg, input_reg);
+ __ j(parity_odd, &no_special_nan_handling);
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(MemOperand(rsp, 0), input_reg);
+ __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize);
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ Move(reg, factory()->the_hole_value());
+ __ jmp(&done);
+ __ bind(&canonicalize);
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+ __ movq(input_reg, kScratchRegister);
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
@@ -4138,6 +4564,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+
+ __ bind(&done);
}
@@ -4183,43 +4611,58 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Label load_smi, done;
- // Smi check.
- __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
- // Heap number map check.
- __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- if (deoptimize_on_undefined) {
- DeoptimizeIf(not_equal, env);
- } else {
- Label heap_number;
- __ j(equal, &heap_number, Label::kNear);
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
- __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
- DeoptimizeIf(not_equal, env);
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
- // Convert undefined to NaN. Compute NaN as 0/0.
- __ xorps(result_reg, result_reg);
- __ divsd(result_reg, result_reg);
- __ jmp(&done, Label::kNear);
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorps(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
- __ bind(&heap_number);
- }
- // Heap number to XMM conversion.
- __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
- if (deoptimize_on_minus_zero) {
- XMMRegister xmm_scratch = xmm0;
- __ xorps(xmm_scratch, xmm_scratch);
- __ ucomisd(xmm_scratch, result_reg);
- __ j(not_equal, &done, Label::kNear);
- __ movmskpd(kScratchRegister, result_reg);
- __ testq(kScratchRegister, Immediate(1));
- DeoptimizeIf(not_zero, env);
+ __ bind(&heap_number);
+ }
+ // Heap number to XMM conversion.
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ XMMRegister xmm_scratch = xmm0;
+ __ xorps(xmm_scratch, xmm_scratch);
+ __ ucomisd(xmm_scratch, result_reg);
+ __ j(not_equal, &done, Label::kNear);
+ __ movmskpd(kScratchRegister, result_reg);
+ __ testq(kScratchRegister, Immediate(1));
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ testq(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ testq(input_reg, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ __ Set(kScratchRegister, BitCast<uint64_t>(
+ FixedDoubleArray::hole_nan_as_double()));
+ __ movq(result_reg, kScratchRegister);
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done, Label::kNear);
// Smi to XMM conversion
__ bind(&load_smi);
@@ -4308,10 +4751,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
+
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -4328,7 +4789,9 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations.
__ cvttsd2siq(result_reg, input_reg);
- __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE);
+ __ movq(kScratchRegister,
+ V8_INT64_C(0x8000000000000000),
+ RelocInfo::NONE64);
__ cmpq(result_reg, kScratchRegister);
DeoptimizeIf(equal, instr->environment());
} else {
@@ -4433,10 +4896,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
void LCodeGen::DoCheckMapCommon(Register reg,
Handle<Map> map,
CompareMapMode mode,
- LEnvironment* env) {
+ LInstruction* instr) {
Label success;
__ CompareMap(reg, map, &success, mode);
- DeoptimizeIf(not_equal, env);
+ DeoptimizeIf(not_equal, instr->environment());
__ bind(&success);
}
@@ -4454,7 +4917,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
__ j(equal, &success);
}
Handle<Map> map = map_set->last();
- DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment());
+ DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr);
__ bind(&success);
}
@@ -4512,25 +4975,22 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
- Handle<JSObject> holder = instr->holder();
- Handle<JSObject> current_prototype = instr->prototype();
+ ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
+ ZoneList<Handle<Map> >* maps = instr->maps();
- // Load prototype object.
- __ LoadHeapObject(reg, current_prototype);
+ ASSERT(prototypes->length() == maps->length());
- // Check prototype maps up to the holder.
- while (!current_prototype.is_identical_to(holder)) {
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
- current_prototype =
- Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
- // Load next prototype object.
- __ LoadHeapObject(reg, current_prototype);
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) {
+ for (int i = 0; i < maps->length(); i++) {
+ prototype_maps_.Add(maps->at(i), info()->zone());
+ }
+ __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
+ } else {
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(reg, prototypes->at(i));
+ DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr);
+ }
}
-
- // Check the holder map.
- DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()),
- ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
}
@@ -4631,10 +5091,63 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register result = ToRegister(instr->result());
+ Register temp = ToRegister(instr->temp());
+
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ if (instr->size()->IsConstantOperand()) {
+ int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+ __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ } else {
+ Register size = ToRegister(instr->size());
+ __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ Set(result, 0);
+
+ PushSafepointRegistersScope scope(this);
+ __ Integer32ToSmi(size, size);
+ __ push(size);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate_elements_kind();
+ AllocationSiteMode allocation_site_mode =
+ instr->hydrogen()->allocation_site_mode();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
@@ -4665,8 +5178,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(instr->hydrogen()->depth() == 1);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
@@ -4674,10 +5187,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
} else {
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
- FastCloneShallowArrayStub stub(mode, length);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4685,10 +5198,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset) {
+ int* offset,
+ AllocationSiteMode mode) {
ASSERT(!source.is(rcx));
ASSERT(!result.is(rcx));
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ object->map()->CanTrackAllocationSite();
+
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
@@ -4698,8 +5215,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
- int elements_offset = *offset + object_size;
int elements_size = has_elements ? elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
*offset += object_size + elements_size;
// Copy object header.
@@ -4718,22 +5240,31 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
+ isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(rcx, Operand(result, *offset));
__ movq(FieldOperand(result, total_offset), rcx);
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
__ movq(FieldOperand(result, total_offset), rcx);
} else {
- __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(rcx, value, RelocInfo::NONE64);
__ movq(FieldOperand(result, total_offset), rcx);
}
}
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
+ __ movq(FieldOperand(result, object_size), kScratchRegister);
+ __ movq(FieldOperand(result, object_size + kPointerSize), source);
+ }
+
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
@@ -4751,25 +5282,26 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
int64_t value = double_array->get_representation(i);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(rcx, value, RelocInfo::NONE64);
__ movq(FieldOperand(result, total_offset), rcx);
}
} else if (elements->IsFixedArray()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i));
+ Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(rcx, Operand(result, *offset));
__ movq(FieldOperand(result, total_offset), rcx);
__ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset);
+ EmitDeepCopy(value_object, result, source, offset,
+ DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
__ movq(FieldOperand(result, total_offset), rcx);
} else {
- __ movq(rcx, value, RelocInfo::NONE);
+ __ movq(rcx, value, RelocInfo::NONE64);
__ movq(FieldOperand(result, total_offset), rcx);
}
}
@@ -4814,7 +5346,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset,
+ instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
@@ -4824,28 +5357,36 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> constant_properties =
instr->hydrogen()->constant_properties();
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(constant_properties);
int flags = instr->hydrogen()->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
flags |= instr->hydrogen()->has_function()
? ObjectLiteral::kHasFunction
: ObjectLiteral::kNoFlags;
- __ Push(Smi::FromInt(flags));
- // Pick the right runtime function or stub to call.
+ // Set up the parameters to the stub/runtime call and pick the right
+ // runtime function or stub to call.
int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
+ __ PushHeapObject(literals);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(constant_properties);
+ __ Push(Smi::FromInt(flags));
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
} else if (flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+ __ PushHeapObject(literals);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(constant_properties);
+ __ Push(Smi::FromInt(flags));
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
} else {
+ __ LoadHeapObject(rax, literals);
+ __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Move(rcx, constant_properties);
+ __ Move(rdx, Smi::FromInt(flags));
FastCloneShallowObjectStub stub(properties_count);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
}
}
@@ -4915,7 +5456,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->language_mode());
__ Push(shared_info);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
__ push(rsi);
__ Push(shared_info);
@@ -4971,14 +5512,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Register input,
Handle<String> type_name) {
Condition final_branch_condition = no_condition;
- if (type_name->Equals(heap()->number_symbol())) {
+ if (type_name->Equals(heap()->number_string())) {
__ JumpIfSmi(input, true_label);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->string_symbol())) {
+ } else if (type_name->Equals(heap()->string_string())) {
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label);
@@ -4986,17 +5527,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
- } else if (type_name->Equals(heap()->boolean_symbol())) {
+ } else if (type_name->Equals(heap()->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
- } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
+ } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->undefined_symbol())) {
+ } else if (type_name->Equals(heap()->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label);
__ JumpIfSmi(input, false_label);
@@ -5006,7 +5547,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
- } else if (type_name->Equals(heap()->function_symbol())) {
+ } else if (type_name->Equals(heap()->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
@@ -5014,13 +5555,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
- } else if (type_name->Equals(heap()->object_symbol())) {
+ } else if (type_name->Equals(heap()->object_string())) {
__ JumpIfSmi(input, false_label);
if (!FLAG_harmony_typeof) {
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label);
}
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ if (FLAG_harmony_symbols) {
+ __ CmpObjectType(input, SYMBOL_TYPE, input);
+ __ j(equal, true_label);
+ __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ } else {
+ __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ }
__ j(below, false_label);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label);
@@ -5066,6 +5613,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+ if (info()->IsStub()) return;
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
@@ -5091,6 +5639,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
}
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+ // Nothing to see here, move on!
+}
+
+
void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
@@ -5155,7 +5708,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
diff --git a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
index 0f8a62a..66880aa 100644
--- a/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
+++ b/src/3rdparty/v8/src/x64/lithium-codegen-x64.h
@@ -56,6 +56,7 @@ class LCodeGen BASE_EMBEDDED {
deoptimizations_(4, info->zone()),
jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()),
+ prototype_maps_(0, info->zone()),
inlined_function_count_(0),
scope_(info->scope()),
status_(UNUSED),
@@ -63,6 +64,7 @@ class LCodeGen BASE_EMBEDDED {
deferred_(8, info->zone()),
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
+ frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -77,6 +79,15 @@ class LCodeGen BASE_EMBEDDED {
Heap* heap() const { return isolate()->heap(); }
Zone* zone() const { return zone_; }
+ bool NeedsEagerFrame() const {
+ return GetStackSlotCount() > 0 ||
+ info()->is_non_deferred_calling() ||
+ !info()->IsStub();
+ }
+ bool NeedsDeferredFrame() const {
+ return !NeedsEagerFrame() && info()->is_deferred_calling();
+ }
+
// Support for converting LOperands to assembler types.
Register ToRegister(LOperand* op) const;
XMMRegister ToDoubleRegister(LOperand* op) const;
@@ -106,11 +117,12 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocateObject(LAllocateObject* instr);
+ void DoDeferredAllocate(LAllocate* instr);
void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoCheckMapCommon(Register reg, Handle<Map> map,
- CompareMapMode mode, LEnvironment* env);
+ CompareMapMode mode, LInstruction* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@@ -158,7 +170,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
- int GetParameterCount() const { return scope()->num_parameters(); }
+ int GetParameterCount() const { return info()->num_parameters(); }
void Abort(const char* reason);
void Comment(const char* format, ...);
@@ -229,8 +241,10 @@ class LCodeGen BASE_EMBEDDED {
LOperand* op,
bool is_tagged,
bool is_uint32,
+ bool arguments_known,
int arguments_index,
int arguments_count);
+ void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
@@ -272,16 +286,13 @@ class LCodeGen BASE_EMBEDDED {
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
- void EmitNumberUntagD(Register input,
- XMMRegister result,
- bool deoptimize_on_undefined,
- bool deoptimize_on_minus_zero,
- LEnvironment* env);
-
-
- void DeoptIfTaggedButNotSmi(LEnvironment* environment,
- HValue* value,
- LOperand* operand);
+ void EmitNumberUntagD(
+ Register input,
+ XMMRegister result,
+ bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
@@ -324,14 +335,19 @@ class LCodeGen BASE_EMBEDDED {
void EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
- int* offset);
+ int* offset,
+ AllocationSiteMode mode);
struct JumpTableEntry {
- explicit inline JumpTableEntry(Address entry)
+ inline JumpTableEntry(Address entry, bool frame, bool is_lazy)
: label(),
- address(entry) { }
+ address(entry),
+ needs_frame(frame),
+ is_lazy_deopt(is_lazy) { }
Label label;
Address address;
+ bool needs_frame;
+ bool is_lazy_deopt;
};
void EnsureSpaceForLazyDeopt(int space_needed);
@@ -341,8 +357,6 @@ class LCodeGen BASE_EMBEDDED {
void DoStoreKeyedExternalArray(LStoreKeyed* instr);
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr);
- template <class T>
- void PrepareKeyForKeyedOp(T* hydrogen_instr, LOperand* key);
Zone* zone_;
LPlatformChunk* const chunk_;
@@ -355,6 +369,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
+ ZoneList<Handle<Map> > prototype_maps_;
int inlined_function_count_;
Scope* const scope_;
Status status_;
@@ -362,6 +377,7 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
int last_lazy_deopt_pc_;
+ bool frame_is_built_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
@@ -376,6 +392,7 @@ class LCodeGen BASE_EMBEDDED {
public:
explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) {
+ ASSERT(codegen_->info()->is_calling());
ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
codegen_->masm_->PushSafepointRegisters();
codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.cc b/src/3rdparty/v8/src/x64/lithium-x64.cc
index 3985dc0..f591437 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.cc
+++ b/src/3rdparty/v8/src/x64/lithium-x64.cc
@@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
#undef DEFINE_COMPILE
LOsrEntry::LOsrEntry() {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) {
register_spills_[i] = NULL;
}
- for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) {
+ for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
double_register_spills_[i] = NULL;
}
}
@@ -114,7 +114,11 @@ void LInstruction::PrintDataTo(StringStream* stream) {
stream->Add("= ");
for (int i = 0; i < InputCount(); i++) {
if (i > 0) stream->Add(" ");
- InputAt(i)->PrintTo(stream);
+ if (InputAt(i) == NULL) {
+ stream->Add("NULL");
+ } else {
+ InputAt(i)->PrintTo(stream);
+ }
}
}
@@ -299,6 +303,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathExp::PrintDataTo(StringStream* stream) {
+ value()->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -348,6 +357,17 @@ void LCallNew::PrintDataTo(StringStream* stream) {
}
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+ stream->Add("= ");
+ constructor()->PrintTo(stream);
+ stream->Add(" #%d / ", arity());
+ ASSERT(hydrogen()->property_cell()->value()->IsSmi());
+ ElementsKind kind = static_cast<ElementsKind>(
+ Smi::cast(hydrogen()->property_cell()->value())->value());
+ stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -395,11 +415,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d]", additional_index());
+ } else {
+ stream->Add("]");
+ }
+}
+
+
void LStoreKeyed::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- stream->Add("] <- ");
+ if (hydrogen()->IsDehoisted()) {
+ stream->Add(" + %d] <-", additional_index());
+ } else {
+ stream->Add("] <- ");
+ }
value()->PrintTo(stream);
}
@@ -598,6 +634,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
+ info()->MarkAsNonDeferredCalling();
+
#ifdef DEBUG
instr->VerifyCall();
#endif
@@ -663,6 +701,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
}
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+ return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
return AssignEnvironment(new(zone()) LDeoptimize);
}
@@ -947,6 +990,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
}
+LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) {
+ LOperand* object = UseRegisterAtStart(instr->object());
+ return DefineAsRegister(new(zone()) LInstanceSize(object));
+}
+
+
LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
LOperand* receiver = UseRegister(instr->receiver());
LOperand* function = UseRegisterAtStart(instr->function());
@@ -983,7 +1032,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
- return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext);
+ // If there is a non-return use, the context must be allocated in a register.
+ for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ return DefineAsRegister(new(zone()) LContext);
+ }
+ }
+
+ return NULL;
}
@@ -1030,6 +1086,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+ } else if (op == kMathExp) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
@@ -1069,7 +1133,8 @@ LInstruction* LChunkBuilder::DoCallNamed(HCallNamed* instr) {
LInstruction* LChunkBuilder::DoCallGlobal(HCallGlobal* instr) {
argument_count_ -= instr->argument_count();
- return MarkAsCall(DefineFixed(new(zone()) LCallGlobal(instr->qml_global()), rax), instr);
+ LCallGlobal* result = new(zone()) LCallGlobal(instr->qml_global());
+ return MarkAsCall(DefineFixed(result, rax), instr);
}
@@ -1087,6 +1152,15 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
}
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+ ASSERT(FLAG_optimize_constructed_arrays);
+ LOperand* constructor = UseFixed(instr->constructor(), rdi);
+ argument_count_ -= instr->argument_count();
+ LCallNewArray* result = new(zone()) LCallNewArray(constructor);
+ return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
LOperand* function = UseFixed(instr->function(), rdi);
argument_count_ -= instr->argument_count();
@@ -1156,6 +1230,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
+ if (instr->HasPowerOf2Divisor()) {
+ ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
+ LOperand* value = UseRegisterAtStart(instr->left());
+ LDivI* div =
+ new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
+ return AssignEnvironment(DefineSameAsFirst(div));
+ }
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
LOperand* temp = FixedTemp(rdx);
@@ -1190,12 +1271,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) {
return constant_val->CopyToRepresentation(Representation::Integer32(),
divisor->block()->zone());
}
+ // A value with an integer representation does not need to be transformed.
+ if (divisor->representation().IsInteger32()) {
+ return divisor;
+ // A change from an integer32 can be replaced by the integer32 value.
+ } else if (divisor->IsChange() &&
+ HChange::cast(divisor)->from().IsInteger32()) {
+ return HChange::cast(divisor)->value();
+ }
return NULL;
}
LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
HValue* right = instr->right();
+ if (!right->IsConstant()) {
+ ASSERT(right->representation().IsInteger32());
+ // The temporary operand is necessary to ensure that right is not allocated
+ // into rdx.
+ LOperand* temp = FixedTemp(rdx);
+ LOperand* dividend = UseFixed(instr->left(), rax);
+ LOperand* divisor = UseRegister(instr->right());
+ LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp);
+ return AssignEnvironment(DefineFixed(flooring_div, rax));
+ }
+
ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value());
LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right));
int32_t divisor_si = HConstant::cast(right)->Integer32Value();
@@ -1389,7 +1489,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->GetInputRepresentation();
+ Representation r = instr->representation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1552,6 +1652,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+ LOperand* string = UseRegister(instr->string());
+ LOperand* index = UseRegister(instr->index());
+ ASSERT(rcx.is_byte_register());
+ LOperand* value = UseFixed(instr->value(), rcx);
+ LSeqStringSetChar* result =
+ new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) {
+ return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoInductionVariableAnnotation(
+ HInductionVariableAnnotation* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1588,8 +1710,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
Representation from = instr->from();
Representation to = instr->to();
+ // Only mark conversions that might need to allocate as calling rather than
+ // all changes. This makes simple, non-allocating conversion not have to force
+ // building a stack frame.
if (from.IsTagged()) {
if (to.IsDouble()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LNumberUntagD* res = new(zone()) LNumberUntagD(value);
return AssignEnvironment(DefineAsRegister(res));
@@ -1607,6 +1733,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
}
} else if (from.IsDouble()) {
if (to.IsTagged()) {
+ info()->MarkAsDeferredCalling();
LOperand* value = UseRegister(instr->value());
LOperand* temp = TempRegister();
@@ -1620,6 +1747,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
+ info()->MarkAsDeferredCalling();
if (to.IsTagged()) {
HValue* val = instr->value();
LOperand* value = UseRegister(val);
@@ -1676,6 +1804,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
}
+LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) {
+ LOperand* value = UseRegisterAtStart(instr->value());
+ return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) {
LOperand* value = UseRegisterAtStart(instr->value());
return AssignEnvironment(new(zone()) LCheckFunction(value));
@@ -1844,15 +1978,16 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = ArrayOpClobbersKey<HLoadKeyed>(instr);
+ bool clobbers_key = instr->key()->representation().IsTagged();
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LOperand* elements = UseRegisterAtStart(instr->elements());
- LLoadKeyed* result = new(zone()) LLoadKeyed(elements, key);
+ LLoadKeyed* result = NULL;
-#ifdef DEBUG
- if (instr->is_external()) {
+ if (!instr->is_external()) {
+ LOperand* obj = UseRegisterAtStart(instr->elements());
+ result = new(zone()) LLoadKeyed(obj, key);
+ } else {
ASSERT(
(instr->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
@@ -1860,8 +1995,9 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ LOperand* external_pointer = UseRegister(instr->elements());
+ result = new(zone()) LLoadKeyed(external_pointer, key);
}
-#endif
DefineAsRegister(result);
bool can_deoptimize = instr->RequiresHoleCheck() ||
@@ -1882,35 +2018,51 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- bool clobbers_key = ArrayOpClobbersKey<HStoreKeyed>(instr);
- LOperand* key = (clobbers_key || needs_write_barrier)
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* elements = UseRegisterAtStart(instr->elements());
+ ElementsKind elements_kind = instr->elements_kind();
+ bool clobbers_key = instr->key()->representation().IsTagged();
-#ifdef DEBUG
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
- } else {
- ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
- (instr->value()->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->value()->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ LOperand* object = NULL;
+ LOperand* key = NULL;
+ LOperand* val = NULL;
+
+ if (instr->value()->representation().IsDouble()) {
+ object = UseRegisterAtStart(instr->elements());
+ val = UseTempRegister(instr->value());
+ key = clobbers_key ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ } else {
+ ASSERT(instr->value()->representation().IsTagged());
+ object = UseTempRegister(instr->elements());
+ val = needs_write_barrier ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ key = (clobbers_key || needs_write_barrier)
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ }
+
+ return new(zone()) LStoreKeyed(object, key, val);
}
-#endif
- LStoreKeyed* result = new(zone()) LStoreKeyed(elements, key, val);
- ASSERT(result != NULL);
- return result;
+ ASSERT(
+ (instr->value()->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->value()->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->elements()->representation().IsExternal());
+ bool val_is_temp_register =
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == EXTERNAL_FLOAT_ELEMENTS;
+ LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ : UseRegister(instr->value());
+ LOperand* key = clobbers_key ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LOperand* external_pointer = UseRegister(instr->elements());
+ return new(zone()) LStoreKeyed(external_pointer, key, val);
}
@@ -1931,15 +2083,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LInstruction* LChunkBuilder::DoTransitionElementsKind(
HTransitionElementsKind* instr) {
- ElementsKind from_kind = instr->original_map()->elements_kind();
- ElementsKind to_kind = instr->transitioned_map()->elements_kind();
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ LOperand* object = UseRegister(instr->object());
+ if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
LOperand* object = UseRegister(instr->object());
LOperand* new_map_reg = TempRegister();
LOperand* temp_reg = TempRegister();
LTransitionElementsKind* result =
new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg);
- return DefineSameAsFirst(result);
+ return result;
+ } else if (FLAG_compiled_transitions) {
+ LTransitionElementsKind* result =
+ new(zone()) LTransitionElementsKind(object, NULL, NULL);
+ return AssignPointerMap(result);
} else {
LOperand* object = UseFixed(instr->object(), rax);
LOperand* fixed_object_reg = FixedTemp(rdx);
@@ -1948,11 +2103,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind(
new(zone()) LTransitionElementsKind(object,
new_map_reg,
fixed_object_reg);
- return MarkAsCall(DefineFixed(result, rax), instr);
+ return MarkAsCall(result, instr);
}
}
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+ HTrapAllocationMemento* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* temp = TempRegister();
+ LTrapAllocationMemento* result =
+ new(zone()) LTrapAllocationMemento(object, temp);
+ return AssignEnvironment(result);
+}
+
+
LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
bool needs_write_barrier = instr->NeedsWriteBarrier();
bool needs_write_barrier_for_map = !instr->transition().is_null() &&
@@ -2021,11 +2186,21 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) {
LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) {
+ info()->MarkAsDeferredCalling();
LAllocateObject* result = new(zone()) LAllocateObject(TempRegister());
return AssignPointerMap(DefineAsRegister(result));
}
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+ info()->MarkAsDeferredCalling();
+ LOperand* size = UseTempRegister(instr->size());
+ LOperand* temp = TempRegister();
+ LAllocate* result = new(zone()) LAllocate(size, temp);
+ return AssignPointerMap(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
}
@@ -2068,8 +2243,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
- int spill_index = chunk()->GetParameterStackSlot(instr->index());
- return DefineAsSpilled(new(zone()) LParameter, spill_index);
+ LParameter* result = new(zone()) LParameter;
+ if (instr->kind() == HParameter::STACK_PARAMETER) {
+ int spill_index = chunk()->GetParameterStackSlot(instr->index());
+ return DefineAsSpilled(result, spill_index);
+ } else {
+ ASSERT(info()->IsStub());
+ CodeStubInterfaceDescriptor* descriptor =
+ info()->code_stub()->GetInterfaceDescriptor(info()->isolate());
+ Register reg = descriptor->register_params_[instr->index()];
+ return DefineFixed(result, reg);
+ }
}
@@ -2137,7 +2321,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = 0; i < instr->values()->length(); ++i) {
+ for (int i = instr->values()->length() - 1; i >= 0; --i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
@@ -2165,6 +2349,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+ info()->MarkAsDeferredCalling();
if (instr->is_function_entry()) {
return MarkAsCall(new(zone()) LStackCheck, instr);
} else {
@@ -2181,8 +2366,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_count(),
instr->function(),
undefined,
- instr->call_kind(),
- instr->inlining_kind());
+ instr->inlining_kind(),
+ instr->undefined_receiver());
if (instr->arguments_var() != NULL) {
inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject());
}
diff --git a/src/3rdparty/v8/src/x64/lithium-x64.h b/src/3rdparty/v8/src/x64/lithium-x64.h
index a437a2b..0133578 100644
--- a/src/3rdparty/v8/src/x64/lithium-x64.h
+++ b/src/3rdparty/v8/src/x64/lithium-x64.h
@@ -49,6 +49,7 @@ class LCodeGen;
#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
V(AccessArgumentsAt) \
V(AddI) \
+ V(Allocate) \
V(AllocateObject) \
V(ApplyArguments) \
V(ArgumentsElements) \
@@ -67,6 +68,7 @@ class LCodeGen;
V(CallKnownGlobal) \
V(CallNamed) \
V(CallNew) \
+ V(CallNewArray) \
V(CallRuntime) \
V(CallStub) \
V(CheckFunction) \
@@ -93,6 +95,7 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
@@ -107,6 +110,7 @@ class LCodeGen;
V(In) \
V(InstanceOf) \
V(InstanceOfKnownGlobal) \
+ V(InstanceSize) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(Uint32ToDouble) \
@@ -131,6 +135,7 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
@@ -148,6 +153,7 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
+ V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -170,6 +176,7 @@ class LCodeGen;
V(Throw) \
V(ToFastProperties) \
V(TransitionElementsKind) \
+ V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
@@ -250,6 +257,11 @@ class LInstruction: public ZoneObject {
void MarkAsCall() { is_call_ = true; }
+ // Interface to the register allocator and iterators.
+ bool ClobbersTemps() const { return is_call_; }
+ bool ClobbersRegisters() const { return is_call_; }
+ bool ClobbersDoubleRegisters() const { return is_call_; }
+
virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
// Interface to the register allocator and iterators.
@@ -397,6 +409,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> {
};
+class LDummyUse: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LDummyUse(LOperand* value) {
+ inputs_[0] = value;
+ }
+ DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
class LDeoptimize: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
@@ -566,6 +587,8 @@ class LDivI: public LTemplateInstruction<1, 2, 1> {
LOperand* right() { return inputs_[1]; }
LOperand* temp() { return temps_[0]; }
+ bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); }
+
DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
DECLARE_HYDROGEN_ACCESSOR(Div)
};
@@ -620,7 +643,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
+ return hydrogen()->representation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -643,6 +666,25 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
+class LMathExp: public LTemplateInstruction<1, 1, 2> {
+ public:
+ LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ ExternalReference::InitializeMathExpData();
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp1() { return temps_[0]; }
+ LOperand* temp2() { return temps_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -914,6 +956,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
};
+class LInstanceSize: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LInstanceSize(LOperand* object) {
+ inputs_[0] = object;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size")
+ DECLARE_HYDROGEN_ACCESSOR(InstanceSize)
+};
+
+
class LBoundsCheck: public LTemplateInstruction<0, 2, 0> {
public:
LBoundsCheck(LOperand* index, LOperand* length) {
@@ -1135,6 +1190,30 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
};
+class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
+ public:
+ LSeqStringSetChar(String::Encoding encoding,
+ LOperand* string,
+ LOperand* index,
+ LOperand* value) : encoding_(encoding) {
+ inputs_[0] = string;
+ inputs_[1] = index;
+ inputs_[2] = value;
+ }
+
+ String::Encoding encoding() { return encoding_; }
+ LOperand* string() { return inputs_[0]; }
+ LOperand* index() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+ DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+
+ private:
+ String::Encoding encoding_;
+};
+
+
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1364,6 +1443,7 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
+ virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
@@ -1371,14 +1451,6 @@ class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
};
-template <class T>
-inline static bool ArrayOpClobbersKey(T *value) {
- CHECK(value->IsLoadKeyed() || value->IsStoreKeyed());
- return !value->IsConstant() && (value->key()->representation().IsTagged()
- || value->IsDehoisted());
-}
-
-
class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedGeneric(LOperand* obj, LOperand* key) {
@@ -1522,6 +1594,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> {
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+ DECLARE_HYDROGEN_ACCESSOR(Context)
};
@@ -1688,6 +1761,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> {
};
+class LCallNewArray: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LCallNewArray(LOperand* constructor) {
+ inputs_[0] = constructor;
+ }
+
+ LOperand* constructor() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+ DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
@@ -1761,6 +1851,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+ DECLARE_HYDROGEN_ACCESSOR(Change)
};
@@ -1950,6 +2041,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
Handle<Map> original_map() { return hydrogen()->original_map(); }
Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); }
+ ElementsKind from_kind() { return hydrogen()->from_kind(); }
+ ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> {
+ public:
+ LTrapAllocationMemento(LOperand* object,
+ LOperand* temp) {
+ inputs_[0] = object;
+ temps_[0] = temp;
+ }
+
+ LOperand* object() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+ "trap-allocation-memento")
};
@@ -2059,8 +2168,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps)
- Handle<JSObject> prototype() const { return hydrogen()->prototype(); }
- Handle<JSObject> holder() const { return hydrogen()->holder(); }
+ ZoneList<Handle<JSObject> >* prototypes() const {
+ return hydrogen()->prototypes();
+ }
+ ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); }
};
@@ -2140,6 +2251,21 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 1> {
};
+class LAllocate: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LAllocate(LOperand* size, LOperand* temp) {
+ inputs_[0] = size;
+ temps_[0] = temp;
+ }
+
+ LOperand* size() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+ DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
@@ -2265,8 +2391,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
// slot, i.e., that must also be restored to the spill slot on OSR entry.
// NULL if the register has no assigned spill slot. Indexed by allocation
// index.
- LOperand* register_spills_[Register::kNumAllocatableRegisters];
- LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters];
+ LOperand* register_spills_[Register::kMaxNumAllocatableRegisters];
+ LOperand* double_register_spills_[
+ DoubleRegister::kMaxNumAllocatableRegisters];
};
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
index 962c2e8..5f467e3 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.cc
@@ -162,7 +162,7 @@ void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
if (is_int32(address) && !Serializer::enabled()) {
if (emit_debug_code()) {
- movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
push(Immediate(static_cast<int32_t>(address)));
return;
@@ -287,7 +287,7 @@ void MacroAssembler::InNewSpace(Register object,
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
intptr_t new_space_start =
reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
if (scratch.is(object)) {
addq(scratch, kScratchRegister);
} else {
@@ -342,8 +342,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
@@ -376,8 +376,8 @@ void MacroAssembler::RecordWriteArray(Register object,
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
@@ -442,8 +442,8 @@ void MacroAssembler::RecordWrite(Register object,
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
- movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+ movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
+ movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
}
}
@@ -524,11 +524,11 @@ void MacroAssembler::Abort(const char* msg) {
}
#endif
push(rax);
- movq(kScratchRegister, p0, RelocInfo::NONE);
+ movq(kScratchRegister, p0, RelocInfo::NONE64);
push(kScratchRegister);
movq(kScratchRegister,
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
push(kScratchRegister);
if (!has_frame_) {
@@ -546,13 +546,13 @@ void MacroAssembler::Abort(const char* msg) {
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe());
- Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -701,13 +701,13 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
Factory* factory = isolate()->factory();
ExternalReference next_address =
- ExternalReference::handle_scope_next_address();
+ ExternalReference::handle_scope_next_address(isolate());
const int kNextOffset = 0;
const int kLimitOffset = Offset(
- ExternalReference::handle_scope_limit_address(),
+ ExternalReference::handle_scope_limit_address(isolate()),
next_address);
const int kLevelOffset = Offset(
- ExternalReference::handle_scope_level_address(),
+ ExternalReference::handle_scope_level_address(isolate()),
next_address);
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
@@ -720,11 +720,28 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
+
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0);
+ CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
// Call the api function!
movq(rax, reinterpret_cast<int64_t>(function_address),
- RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::EXTERNAL_REFERENCE);
call(rax);
+ if (FLAG_log_timer_events) {
+ FrameScope frame(this, StackFrame::MANUAL);
+ PushSafepointRegisters();
+ PrepareCallCFunction(0);
+ CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
+ PopSafepointRegisters();
+ }
+
#if defined(_WIN64) && !defined(__MINGW64__)
// rax keeps a pointer to v8::Handle, unpack it.
movq(rax, Operand(rax, 0));
@@ -817,7 +834,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
// Set the entry point and jump to the C entry runtime stub.
LoadAddress(rbx, ext);
CEntryStub ces(result_size);
- jmp(ces.GetCode(), RelocInfo::CODE_TARGET);
+ jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
}
@@ -882,8 +899,8 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
- subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(Operand(rsp, i * kDoubleSize), reg);
}
@@ -897,11 +914,11 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
CpuFeatures::Scope scope(SSE2);
- for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
+ addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
@@ -920,7 +937,7 @@ void MacroAssembler::Set(Register dst, int64_t x) {
} else if (is_int32(x)) {
movq(dst, Immediate(static_cast<int32_t>(x)));
} else {
- movq(dst, x, RelocInfo::NONE);
+ movq(dst, x, RelocInfo::NONE64);
}
}
@@ -985,7 +1002,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
if (emit_debug_code()) {
movq(dst,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
cmpq(dst, kSmiConstantRegister);
if (allow_stub_calls()) {
Assert(equal, "Uninitialized kSmiConstantRegister");
@@ -1032,7 +1049,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
UNREACHABLE();
return;
default:
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE);
+ movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
return;
}
if (negative) {
@@ -2769,7 +2786,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail) {
+ Label* fail,
+ int elements_offset) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2788,7 +2806,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
jmp(&done);
@@ -2811,7 +2830,8 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
+ movsd(FieldOperand(elements, index, times_8,
+ FixedDoubleArray::kHeaderSize - elements_offset),
xmm_scratch);
bind(&done);
}
@@ -2891,23 +2911,14 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
}
-static double kUint32Bias =
- static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
-
-
void MacroAssembler::LoadUint32(XMMRegister dst,
Register src,
XMMRegister scratch) {
- Label done;
- cmpl(src, Immediate(0));
- movq(kScratchRegister,
- reinterpret_cast<int64_t>(&kUint32Bias),
- RelocInfo::NONE);
- movsd(scratch, Operand(kScratchRegister, 0));
- cvtlsi2sd(dst, src);
- j(not_sign, &done, Label::kNear);
- addsd(dst, scratch);
- bind(&done);
+ if (FLAG_debug_code) {
+ cmpq(src, Immediate(0xffffffff));
+ Assert(below_equal, "input GPR is expected to have upper32 cleared");
+ }
+ cvtqsi2sd(dst, src);
}
@@ -2986,7 +2997,7 @@ void MacroAssembler::AssertSmi(const Operand& object) {
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
ASSERT(!int32_register.is(kScratchRegister));
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE);
+ movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
cmpq(kScratchRegister, int32_register);
Check(above_equal, "32 bit value in register is not zero-extended");
}
@@ -3128,7 +3139,7 @@ void MacroAssembler::DebugBreak() {
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
}
#endif // ENABLE_DEBUGGER_SUPPORT
@@ -3403,11 +3414,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kNumRegisters * kDoubleSize +
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
arg_stack_space * kPointerSize;
subq(rsp, Immediate(space));
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
@@ -3451,7 +3462,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// r15 : argv
if (save_doubles) {
int offset = -2 * kPointerSize;
- for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
@@ -3733,6 +3744,7 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3752,6 +3764,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, "Allocation is not double aligned");
+ }
+
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
@@ -3770,15 +3789,17 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
// Update allocation top.
UpdateAllocationTopHelper(top_reg, scratch);
+ bool tag_result = (flags & TAG_OBJECT) != 0;
if (top_reg.is(result)) {
- if ((flags & TAG_OBJECT) != 0) {
+ if (tag_result) {
subq(result, Immediate(object_size - kHeapObjectTag));
} else {
subq(result, Immediate(object_size));
}
- } else if ((flags & TAG_OBJECT) != 0) {
+ } else if (tag_result) {
// Tag the result if requested.
- addq(result, Immediate(kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ incq(result);
}
}
@@ -3791,6 +3812,7 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3809,6 +3831,13 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, "Allocation is not double aligned");
+ }
+
// Calculate new top and bail out if new space is exhausted.
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address(isolate());
@@ -3827,7 +3856,8 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
- addq(result, Immediate(kHeapObjectTag));
+ ASSERT(kHeapObjectTag == 1);
+ incq(result);
}
}
@@ -3838,6 +3868,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
Register scratch,
Label* gc_required,
AllocationFlags flags) {
+ ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
@@ -3871,6 +3902,13 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
// Update allocation top.
UpdateAllocationTopHelper(result_end, scratch);
+ // Align the next allocation. Storing the filler map without checking top is
+ // always safe because the limit of the heap is always aligned.
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
+ testq(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, "Allocation is not double aligned");
+ }
+
// Tag the result if requested.
if ((flags & TAG_OBJECT) != 0) {
addq(result, Immediate(kHeapObjectTag));
@@ -3957,7 +3995,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
+ const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
@@ -3968,7 +4006,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqAsciiString::kHeaderSize,
+ AllocateInNewSpace(SeqOneByteString::kHeaderSize,
times_1,
scratch1,
result,
@@ -4231,6 +4269,15 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) {
}
+void MacroAssembler::LoadArrayFunction(Register function) {
+ movq(function,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
+ movq(function,
+ Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map) {
// Load the initial map. The global functions all have initial maps.
@@ -4563,6 +4610,27 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
j(not_equal, &next);
}
+void MacroAssembler::TestJSArrayForAllocationSiteInfo(
+ Register receiver_reg,
+ Register scratch_reg) {
+ Label no_info_available;
+ ExternalReference new_space_start =
+ ExternalReference::new_space_start(isolate());
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ lea(scratch_reg, Operand(receiver_reg,
+ JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag));
+ movq(kScratchRegister, new_space_start);
+ cmpq(scratch_reg, kScratchRegister);
+ j(less, &no_info_available);
+ cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
+ j(greater, &no_info_available);
+ CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
+ Heap::kAllocationSiteInfoMapRootIndex);
+ bind(&no_info_available);
+}
+
} } // namespace v8::internal
diff --git a/src/3rdparty/v8/src/x64/macro-assembler-x64.h b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
index fdddc13..43b6bfb 100644
--- a/src/3rdparty/v8/src/x64/macro-assembler-x64.h
+++ b/src/3rdparty/v8/src/x64/macro-assembler-x64.h
@@ -35,18 +35,6 @@
namespace v8 {
namespace internal {
-// Flags used for the AllocateInNewSpace functions.
-enum AllocationFlags {
- // No special flags.
- NO_ALLOCATION_FLAGS = 0,
- // Return the pointer to the allocated already tagged as a heap object.
- TAG_OBJECT = 1 << 0,
- // The content of the result register already contains the allocation top in
- // new space.
- RESULT_CONTAINS_TOP = 1 << 1
-};
-
-
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
@@ -385,7 +373,7 @@ class MacroAssembler: public Assembler {
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
- RelocInfo::NONE);
+ RelocInfo::NONE64);
}
// Conversions between tagged smi values and non-tagged integer values.
@@ -895,7 +883,8 @@ class MacroAssembler: public Assembler {
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail);
+ Label* fail,
+ int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -1165,6 +1154,7 @@ class MacroAssembler: public Assembler {
// Load the global function with the given index.
void LoadGlobalFunction(int index, Register function);
+ void LoadArrayFunction(Register function);
// Load the initial map from the global function. The registers
// function and map can be the same.
@@ -1326,6 +1316,15 @@ class MacroAssembler: public Assembler {
void CheckEnumCache(Register null_value,
Label* call_runtime);
+ // AllocationSiteInfo support. Arrays may have an associated
+ // AllocationSiteInfo object that can be checked for in order to pretransition
+ // to another type.
+ // On entry, receiver_reg should point to the array object.
+ // scratch_reg gets clobbered.
+ // If allocation info is present, condition flags are set to equal
+ void TestJSArrayForAllocationSiteInfo(Register receiver_reg,
+ Register scratch_reg);
+
private:
// Order general registers are pushed by Pushad.
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
@@ -1413,9 +1412,9 @@ class MacroAssembler: public Assembler {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
- // Needs access to SafepointRegisterStackIndex for optimized frame
+ // Needs access to SafepointRegisterStackIndex for compiled frame
// traversal.
- friend class OptimizedFrame;
+ friend class StandardFrame;
};
@@ -1489,17 +1488,16 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
-#define ACCESS_MASM(masm) { \
- byte* x64_coverage_function = \
- reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
- masm->pushfd(); \
- masm->pushad(); \
- masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
- masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \
- masm->pop(rax); \
- masm->popad(); \
- masm->popfd(); \
- } \
+#define ACCESS_MASM(masm) { \
+ Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
+ masm->pushfq(); \
+ masm->Pushad(); \
+ masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
+ masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \
+ masm->pop(rax); \
+ masm->Popad(); \
+ masm->popfq(); \
+ } \
masm->
#else
#define ACCESS_MASM(masm) masm->
diff --git a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
index 86f7bfe..f5b5e95 100644
--- a/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/3rdparty/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -234,7 +234,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
// If input is ASCII, don't even bother calling here if the string to
// match contains a non-ASCII character.
if (mode_ == ASCII) {
- ASSERT(String::IsAscii(str.start(), str.length()));
+ ASSERT(String::IsOneByte(str.start(), str.length()));
}
#endif
int byte_length = str.length() * char_size();
@@ -280,7 +280,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
(static_cast<uint64_t>(str[i + 5]) << 40) ||
(static_cast<uint64_t>(str[i + 6]) << 48) ||
(static_cast<uint64_t>(str[i + 7]) << 56);
- __ movq(rax, combined_chars, RelocInfo::NONE);
+ __ movq(rax, combined_chars, RelocInfo::NONE64);
__ cmpq(rax, Operand(rbx, byte_offset + i));
i += 8;
} else if (i + 4 <= n) {
@@ -300,7 +300,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str,
ASSERT(mode_ == UC16);
if (i + 4 <= n) {
uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]);
- __ movq(rax, combined_chars, RelocInfo::NONE);
+ __ movq(rax, combined_chars, RelocInfo::NONE64);
__ cmpq(rax,
Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16)));
i += 4;
@@ -393,8 +393,17 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
__ j(not_equal, on_no_match); // Definitely not equal.
__ subb(rax, Immediate('a'));
__ cmpb(rax, Immediate('z' - 'a'));
+#ifndef ENABLE_LATIN_1
__ j(above, on_no_match); // Weren't letters anyway.
-
+#else
+ __ j(below_equal, &loop_increment); // In range 'a'-'z'.
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ subb(rax, Immediate(224 - 'a'));
+ __ cmpb(rax, Immediate(254 - 224));
+ __ j(above, on_no_match); // Weren't Latin-1 letters.
+ __ cmpb(rax, Immediate(247 - 224)); // Check for 247.
+ __ j(equal, on_no_match);
+#endif
__ bind(&loop_increment);
// Increment pointers into match and capture strings.
__ addq(r11, Immediate(1));
@@ -610,7 +619,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable(
Label* on_bit_set) {
__ Move(rax, table);
Register index = current_character();
- if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) {
+ if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
__ movq(rbx, current_character());
__ and_(rbx, Immediate(kTableMask));
index = rbx;
@@ -1305,7 +1314,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsAsciiRepresentationUnderneath();
+ bool is_ascii = subject->IsOneByteRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1336,7 +1345,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
+ if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/src/3rdparty/v8/src/x64/stub-cache-x64.cc b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
index b120efb..c471569 100644
--- a/src/3rdparty/v8/src/x64/stub-cache-x64.cc
+++ b/src/3rdparty/v8/src/x64/stub-cache-x64.cc
@@ -110,14 +110,14 @@ static void ProbeTable(Isolate* isolate,
// the property. This function may return false negatives, so miss_label
// must always call a backup property check that is complete.
// This function is safe to call if the receiver has fast properties.
-// Name must be a symbol and receiver must be a heap object.
+// Name must be an internalized string and receiver must be a heap object.
static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
Label* miss_label,
Register receiver,
Handle<String> name,
Register r0,
Register r1) {
- ASSERT(name->IsSymbol());
+ ASSERT(name->IsInternalizedString());
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->negative_lookups(), 1);
__ IncrementCounter(counters->negative_lookups_miss(), 1);
@@ -350,19 +350,25 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
+ PropertyIndex index) {
+ DoGenerateFastPropertyLoad(
+ masm, dst, src, index.is_inobject(holder), index.translate(holder));
+}
+
+
+void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst,
+ Register src,
+ bool inobject,
+ int index) {
+ int offset = index * kPointerSize;
+ if (!inobject) {
// Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ offset = offset + FixedArray::kHeaderSize;
__ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
+ src = dst;
}
+ __ movq(dst, FieldOperand(src, offset));
}
@@ -467,7 +473,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// Pass the additional arguments.
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- Handle<Object> call_data(api_call_info->data());
+ Handle<Object> call_data(api_call_info->data(), masm->isolate());
if (masm->isolate()->heap()->InNewSpace(*call_data)) {
__ Move(rcx, api_call_info);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
@@ -718,6 +724,15 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
}
+void StubCompiler::GenerateStoreMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+ Handle<Code> code = (kind == Code::STORE_IC)
+ ? masm->isolate()->builtins()->StoreIC_Miss()
+ : masm->isolate()->builtins()->KeyedStoreIC_Miss();
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) {
Handle<Code> code =
masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
@@ -897,6 +912,11 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm,
#define __ ACCESS_MASM((masm()))
+void StubCompiler::GenerateTailCall(Handle<Code> code) {
+ __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register object_reg,
Handle<JSObject> holder,
@@ -905,7 +925,9 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
Register scratch2,
Handle<String> name,
int save_at_depth,
- Label* miss) {
+ Label* miss,
+ PrototypeCheckType check) {
+ Handle<JSObject> first = object;
// Make sure there's no overlap between holder and object registers.
ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
@@ -935,8 +957,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
if (!current->HasFastProperties() &&
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
- if (!name->IsSymbol()) {
- name = factory()->LookupSymbol(name);
+ if (!name->IsInternalizedString()) {
+ name = factory()->InternalizeString(name);
}
ASSERT(current->property_dictionary()->FindEntry(*name) ==
StringDictionary::kNotFound);
@@ -954,8 +976,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Save the map in scratch1 for later.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
}
- __ CheckMap(reg, Handle<Map>(current_map),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
+ ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Check access rights to the global object. This has to happen after
// the map check so that we know that the object is actually a global
@@ -987,9 +1011,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
- // Check the holder map.
- __ CheckMap(reg, Handle<Map>(holder->map()),
- miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
+ // Check the holder map.
+ __ CheckMap(reg, Handle<Map>(holder->map()),
+ miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+ }
// Perform security check for access to the global object.
ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded());
@@ -1007,110 +1033,123 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- int index,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
+ Label* miss) {
+ if (!miss->is_unused()) {
+ __ jmp(success);
+ __ bind(miss);
+ GenerateLoadMiss(masm(), kind());
+ }
+}
- // Check the prototype chain.
- Register reg = CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
- // Get the value from the properties.
- GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
- __ ret(0);
-}
+Register BaseLoadStubCompiler::CallbackHandlerFrontend(
+ Handle<JSObject> object,
+ Register object_reg,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ Label* success,
+ Handle<ExecutableAccessorInfo> callback) {
+ Label miss;
+ Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
-void StubCompiler::GenerateDictionaryLoadCallback(Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- ASSERT(!receiver.is(scratch1));
- ASSERT(!receiver.is(scratch2));
- ASSERT(!receiver.is(scratch3));
-
- // Load the properties dictionary.
- Register dictionary = scratch1;
- __ movq(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
- // Probe the dictionary.
- Label probe_done;
- StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
- miss,
- &probe_done,
- dictionary,
- name_reg,
- scratch2,
- scratch3);
- __ bind(&probe_done);
-
- // If probing finds an entry in the dictionary, scratch3 contains the
- // index into the dictionary. Check that the value is the callback.
- Register index = scratch3;
- const int kElementsStartOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kElementsStartIndex * kPointerSize;
- const int kValueOffset = kElementsStartOffset + kPointerSize;
- __ movq(scratch2,
- Operand(dictionary, index, times_pointer_size,
- kValueOffset - kHeapObjectTag));
- __ movq(scratch3, callback, RelocInfo::EMBEDDED_OBJECT);
- __ cmpq(scratch2, scratch3);
- __ j(not_equal, miss);
+ if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
+ ASSERT(!reg.is(scratch2()));
+ ASSERT(!reg.is(scratch3()));
+ ASSERT(!reg.is(scratch4()));
+
+ // Load the properties dictionary.
+ Register dictionary = scratch4();
+ __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
+
+ // Probe the dictionary.
+ Label probe_done;
+ StringDictionaryLookupStub::GeneratePositiveLookup(masm(),
+ &miss,
+ &probe_done,
+ dictionary,
+ this->name(),
+ scratch2(),
+ scratch3());
+ __ bind(&probe_done);
+
+ // If probing finds an entry in the dictionary, scratch3 contains the
+ // index into the dictionary. Check that the value is the callback.
+ Register index = scratch3();
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(scratch2(),
+ Operand(dictionary, index, times_pointer_size,
+ kValueOffset - kHeapObjectTag));
+ __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(scratch2(), scratch3());
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(success, &miss);
+ return reg;
}
-void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- Handle<AccessorInfo> callback,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
+void BaseLoadStubCompiler::NonexistentHandlerFrontend(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Label* success,
+ Handle<GlobalObject> global) {
+ Label miss;
- // Check that the maps haven't changed.
- Register reg = CheckPrototypes(object, receiver, holder, scratch1,
- scratch2, scratch3, name, miss);
+ Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
- if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
- GenerateDictionaryLoadCallback(
- reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss);
+ // If the last object in the prototype chain is a global object,
+ // check that the global property cell is empty.
+ if (!global.is_null()) {
+ GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
+ if (!last->HasFastProperties()) {
+ __ movq(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
+ __ movq(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
+ __ Cmp(scratch2(), isolate()->factory()->null_value());
+ __ j(not_equal, &miss);
+ }
+
+ HandlerFrontendFooter(success, &miss);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadField(Register reg,
+ Handle<JSObject> holder,
+ PropertyIndex index) {
+ // Get the value from the properties.
+ GenerateFastPropertyLoad(masm(), rax, reg, holder, index);
+ __ ret(0);
+}
+
+
+void BaseLoadStubCompiler::GenerateLoadCallback(
+ Register reg,
+ Handle<ExecutableAccessorInfo> callback) {
// Insert additional parameters into the stack frame above return address.
- ASSERT(!scratch2.is(reg));
- __ pop(scratch2); // Get return address to place it below.
+ ASSERT(!scratch2().is(reg));
+ __ pop(scratch2()); // Get return address to place it below.
- __ push(receiver); // receiver
+ __ push(receiver()); // receiver
__ push(reg); // holder
if (heap()->InNewSpace(callback->data())) {
- __ Move(scratch1, callback);
- __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
+ __ Move(scratch1(), callback);
+ __ push(FieldOperand(scratch1(),
+ ExecutableAccessorInfo::kDataOffset)); // data
} else {
- __ Push(Handle<Object>(callback->data()));
+ __ Push(Handle<Object>(callback->data(), isolate()));
}
__ PushAddress(ExternalReference::isolate_address()); // isolate
- __ push(name_reg); // name
- // Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const AccessorInfo& to the C++ callback.
+ __ push(name()); // name
+ // Save a pointer to where we pushed the arguments pointer. This will be
+ // passed as the const ExecutableAccessorInfo& to the C++ callback.
#if defined(__MINGW64__)
Register accessor_info_arg = rdx;
@@ -1124,9 +1163,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
Register name_arg = rdi;
#endif
- ASSERT(!name_arg.is(scratch2));
+ ASSERT(!name_arg.is(scratch2()));
__ movq(name_arg, rsp);
- __ push(scratch2); // Restore return address.
+ __ push(scratch2()); // Restore return address.
// 4 elements array for v8::Arguments::values_ and handler for name.
const int kStackSpace = 5;
@@ -1149,44 +1188,22 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object,
}
-void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<JSFunction> value,
- Handle<String> name,
- Label* miss) {
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- CheckPrototypes(
- object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
-
+void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
// Return the constant value.
__ LoadHeapObject(rax, value);
__ ret(0);
}
-void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
- Handle<JSObject> interceptor_holder,
- LookupResult* lookup,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Handle<String> name,
- Label* miss) {
+void BaseLoadStubCompiler::GenerateLoadInterceptor(
+ Register holder_reg,
+ Handle<JSObject> object,
+ Handle<JSObject> interceptor_holder,
+ LookupResult* lookup,
+ Handle<String> name) {
ASSERT(interceptor_holder->HasNamedInterceptor());
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
// So far the most popular follow ups for interceptor loads are FIELD
// and CALLBACKS, so inline only them, other cases may be added
// later.
@@ -1195,8 +1212,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
if (lookup->IsField()) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
- lookup->GetCallbackObject()->IsAccessorInfo()) {
- AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
+ ExecutableAccessorInfo* callback =
+ ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
compile_followup_inline = callback->getter() != NULL &&
callback->IsCompatibleReceiver(*object);
}
@@ -1206,17 +1224,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// Compile the interceptor call, followed by inline code to load the
// property from further up the prototype chain if the call fails.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+ ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
// Preserve the receiver register explicitly whenever it is different from
// the holder and it is needed should the interceptor return without any
// result. The CALLBACKS case needs the receiver to be passed into C++ code,
// the FIELD case might cause a miss during the prototype check.
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
- bool must_preserve_receiver_reg = !receiver.is(holder_reg) &&
+ bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
(lookup->type() == CALLBACKS || must_perfrom_prototype_check);
// Save necessary data before invoking an interceptor.
@@ -1225,18 +1240,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
- __ push(receiver);
+ __ push(receiver());
}
__ push(holder_reg);
- __ push(name_reg);
+ __ push(this->name());
// Invoke an interceptor. Note: map checks from receiver to
// interceptor's holder has been compiled before (see a caller
// of this method.)
CompileCallLoadPropertyWithInterceptor(masm(),
- receiver,
+ receiver(),
holder_reg,
- name_reg,
+ this->name(),
interceptor_holder);
// Check if interceptor provided a value for property. If it's
@@ -1248,71 +1263,23 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
__ ret(0);
__ bind(&interceptor_failed);
- __ pop(name_reg);
+ __ pop(this->name());
__ pop(holder_reg);
if (must_preserve_receiver_reg) {
- __ pop(receiver);
+ __ pop(receiver());
}
// Leave the internal frame.
}
- // Check that the maps from interceptor's holder to lookup's holder
- // haven't changed. And load lookup's holder into |holder| register.
- if (must_perfrom_prototype_check) {
- holder_reg = CheckPrototypes(interceptor_holder,
- holder_reg,
- Handle<JSObject>(lookup->holder()),
- scratch1,
- scratch2,
- scratch3,
- name,
- miss);
- }
-
- if (lookup->IsField()) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Retrieve a field from field's holder.
- GenerateFastPropertyLoad(masm(), rax, holder_reg,
- Handle<JSObject>(lookup->holder()),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- Handle<AccessorInfo> callback(
- AccessorInfo::cast(lookup->GetCallbackObject()));
- ASSERT(callback->getter() != NULL);
-
- // Tail call to runtime.
- // Important invariant in CALLBACKS case: the code above must be
- // structured to never clobber |receiver| register.
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(holder_reg);
- __ Move(holder_reg, callback);
- __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
- __ PushAddress(ExternalReference::isolate_address());
- __ push(holder_reg);
- __ push(name_reg);
- __ push(scratch2); // restore return address
-
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- isolate());
- __ TailCallExternalReference(ref, 6, 1);
- }
+ GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
} else { // !compile_followup_inline
// Call the runtime system to load the interceptor.
// Check that the maps haven't changed.
- Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, scratch3,
- name, miss);
- __ pop(scratch2); // save old return address
- PushInterceptorArguments(masm(), receiver, holder_reg,
- name_reg, interceptor_holder);
- __ push(scratch2); // restore old return address
+ __ pop(scratch2()); // save old return address
+ PushInterceptorArguments(masm(), receiver(), holder_reg,
+ this->name(), interceptor_holder);
+ __ push(scratch2()); // restore old return address
ExternalReference ref = ExternalReference(
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
@@ -1388,7 +1355,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- int index,
+ PropertyIndex index,
Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1482,7 +1449,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier;
+ Label attempt_to_grow_elements, with_write_barrier, check_double;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1490,7 +1457,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
- __ j(not_equal, &call_builtin);
+ __ j(not_equal, &check_double);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1521,6 +1488,34 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
+ __ bind(&check_double);
+
+ // Check that the elements are in double mode.
+ __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
+ factory()->fixed_double_array_map());
+ __ j(not_equal, &call_builtin);
+
+ // Get the array's length into rax and calculate new length.
+ __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
+ STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
+ __ addl(rax, Immediate(argc));
+
+ // Get the elements' length into rcx.
+ __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
+
+ // Check if we could survive without allocation.
+ __ cmpl(rax, rcx);
+ __ j(greater, &call_builtin);
+
+ __ movq(rcx, Operand(rsp, argc * kPointerSize));
+ __ StoreNumberToDoubleElements(
+ rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
+
+ // Save new length.
+ __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
+ __ Integer32ToSmi(rax, rax); // Return new length as smi.
+ __ ret((argc + 1) * kPointerSize);
+
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1532,6 +1527,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &call_builtin);
// rdx: receiver
// rbx: map
@@ -1543,7 +1541,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
&try_holey_map);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
// Restore edi.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ jmp(&fast_object);
@@ -1555,7 +1555,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
rdi,
&call_builtin);
ElementsTransitionGenerator::
- GenerateMapChangeElementsTransition(masm());
+ GenerateMapChangeElementsTransition(masm(),
+ DONT_TRACK_ALLOCATION_SITE,
+ NULL);
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
__ bind(&fast_object);
} else {
@@ -1776,8 +1778,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
rax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rbx;
Register index = rdi;
@@ -1854,8 +1857,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
rax,
&miss);
ASSERT(!object.is_identical_to(holder));
- CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
Register receiver = rax;
Register index = rdi;
@@ -1884,7 +1888,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ LoadRoot(rax, Heap::kEmptyStringRootIndex);
+ __ LoadRoot(rax, Heap::kempty_stringRootIndex);
__ ret((argc + 1) * kPointerSize);
}
__ bind(&miss);
@@ -2052,7 +2056,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
const int sign_mask_shift =
(HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte;
__ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift,
- RelocInfo::NONE);
+ RelocInfo::NONE64);
__ testq(rbx, rdi);
__ j(not_zero, &negative_sign);
__ ret(2 * kPointerSize);
@@ -2139,11 +2143,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
}
-Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
- Handle<JSObject> holder,
- Handle<JSFunction> function,
- Handle<String> name,
- CheckType check) {
+void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Label* success) {
// ----------- S t a t e -------------
// rcx : function name
// rsp[0] : return address
@@ -2153,15 +2157,6 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
// rsp[argc * 8] : argument 1
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
-
- if (HasCustomCallGenerator(function)) {
- Handle<Code> code = CompileCustomCall(object, holder,
- Handle<JSGlobalPropertyCell>::null(),
- function, name);
- // A null handle means bail out to the regular compiler code below.
- if (!code.is_null()) return code;
- }
-
Label miss;
GenerateNameCheck(name, &miss);
@@ -2196,76 +2191,92 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
break;
case STRING_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- // Check that the object is a two-byte string or a symbol.
- __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
- __ j(above_equal, &miss);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ // Check that the object is a string.
+ __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax);
+ __ j(above_equal, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::STRING_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
break;
- case NUMBER_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a smi or a heap number.
- __ JumpIfSmi(rdx, &fast);
- __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case SYMBOL_CHECK:
+ // Check that the object is a symbol.
+ __ CmpObjectType(rdx, SYMBOL_TYPE, rax);
+ __ j(not_equal, &miss);
break;
- case BOOLEAN_CHECK:
- if (function->IsBuiltin() || !function->shared()->is_classic_mode()) {
- Label fast;
- // Check that the object is a boolean.
- __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
- __ j(equal, &fast);
- __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
- __ j(not_equal, &miss);
- __ bind(&fast);
- // Check that the maps starting from the prototype haven't changed.
- GenerateDirectLoadGlobalFunctionPrototype(
- masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
- CheckPrototypes(
- Handle<JSObject>(JSObject::cast(object->GetPrototype())),
- rax, holder, rbx, rdx, rdi, name, &miss);
- } else {
- // Calling non-strict non-builtins with a value as the receiver
- // requires boxing.
- __ jmp(&miss);
- }
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ JumpIfSmi(rdx, &fast);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
+ break;
+ }
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ CompareRoot(rdx, Heap::kTrueValueRootIndex);
+ __ j(equal, &fast);
+ __ CompareRoot(rdx, Heap::kFalseValueRootIndex);
+ __ j(not_equal, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateDirectLoadGlobalFunctionPrototype(
+ masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss);
+ CheckPrototypes(
+ Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
+ rax, holder, rbx, rdx, rdi, name, &miss);
break;
+ }
}
+ __ jmp(success);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ GenerateMissBranch();
+}
+
+
+void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) {
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(function, arguments(), JUMP_FUNCTION,
NullCallWrapper(), call_kind);
+}
- // Handle call cache miss.
- __ bind(&miss);
- GenerateMissBranch();
+
+Handle<Code> CallStubCompiler::CompileCallConstant(
+ Handle<Object> object,
+ Handle<JSObject> holder,
+ Handle<String> name,
+ CheckType check,
+ Handle<JSFunction> function) {
+ if (HasCustomCallGenerator(function)) {
+ Handle<Code> code = CompileCustomCall(object, holder,
+ Handle<JSGlobalPropertyCell>::null(),
+ function, name);
+ // A null handle means bail out to the regular compiler code below.
+ if (!code.is_null()) return code;
+ }
+
+ Label success;
+
+ CompileHandlerFrontend(object, holder, name, check, &success);
+ __ bind(&success);
+ CompileHandlerBackend(function);
// Return the generated code.
return GetCode(function);
@@ -2432,7 +2443,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
+ Handle<ExecutableAccessorInfo> callback) {
// ----------- S t a t e -------------
// -- rax : value
// -- rcx : name
@@ -2690,7 +2701,9 @@ Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub =
- KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode();
+ KeyedStoreElementStub(is_js_array,
+ elements_kind,
+ grow_mode_).GetCode(isolate());
__ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
@@ -2736,86 +2749,49 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ return GetCode(Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
}
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> last) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that receiver is not a smi.
- __ JumpIfSmi(rax, &miss);
-
- // Check the maps of the full prototype chain. Also check that
- // global property cells up to (but not including) the last object
- // in the prototype chain are empty.
- CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss);
+Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
+ Handle<JSObject> object,
+ Handle<JSObject> last,
+ Handle<String> name,
+ Handle<GlobalObject> global) {
+ Label success;
- // If the last object in the prototype chain is a global object,
- // check that the global property cell is empty.
- if (last->IsGlobalObject()) {
- GenerateCheckPropertyCell(
- masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss);
- }
+ NonexistentHandlerFrontend(object, last, name, &success, global);
+ __ bind(&success);
// Return undefined if maps of the full prototype chain are still the
// same and no global property with this name contains a value.
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
__ ret(0);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NONEXISTENT, factory()->empty_string());
+ return GetCode(Code::HANDLER_FRAGMENT, Code::NONEXISTENT, name);
}
-Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
- Handle<JSObject> holder,
- int index,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
+Register* LoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
+ return registers;
+}
- GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
- // Return the generated code.
- return GetCode(Code::FIELD, name);
+Register* KeyedLoadStubCompiler::registers() {
+ // receiver, name, scratch1, scratch2, scratch3, scratch4.
+ static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 };
+ return registers;
}
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, r8, callback,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+void KeyedLoadStubCompiler::GenerateNameCheck(Handle<String> name,
+ Register name_reg,
+ Label* miss) {
+ __ Cmp(name_reg, name);
+ __ j(not_equal, miss);
}
@@ -2856,92 +2832,20 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
#define __ ACCESS_MASM(masm())
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> getter) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- // Check that the maps haven't changed.
- __ JumpIfSmi(rax, &miss);
- CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss);
-
- GenerateLoadViaGetter(masm(), getter),
-
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
- Handle<JSObject> holder,
- Handle<JSFunction> value,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
-
- // TODO(368): Compile in the whole chain: all the interceptors in
- // prototypes and ultimate answer.
- GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi,
- name, &miss);
- __ bind(&miss);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<JSObject> object,
- Handle<GlobalObject> holder,
+ Handle<GlobalObject> global,
Handle<JSGlobalPropertyCell> cell,
Handle<String> name,
bool is_dont_delete) {
- // ----------- S t a t e -------------
- // -- rax : receiver
- // -- rcx : name
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
+ Label success, miss;
+ // TODO(verwaest): Directly store to rax. Currently we cannot do this, since
+ // rax is used as receiver(), which we would otherwise clobber before a
+ // potential miss.
- // Check that the maps haven't changed.
- __ JumpIfSmi(rax, &miss);
- CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss);
+ __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK);
+ HandlerFrontendHeader(
+ object, receiver(), Handle<JSObject>::cast(global), name, &miss);
// Get the value from the cell.
__ Move(rbx, cell);
@@ -2956,213 +2860,16 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ HandlerFrontendFooter(&success, &miss);
+ __ bind(&success);
+
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
- __ bind(&miss);
- __ IncrementCounter(counters->named_load_global_stub_miss(), 1);
- GenerateLoadMiss(masm(), Code::LOAD_IC);
-
// Return the generated code.
- return GetCode(Code::NORMAL, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- int index) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_field(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss);
-
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_field(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::FIELD, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<AccessorInfo> callback) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_callback(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, r8, callback,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_callback(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
- Handle<String> name,
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<JSFunction> value) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_constant_function(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi,
- value, name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_constant_function(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CONSTANT_FUNCTION, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor(
- Handle<JSObject> receiver,
- Handle<JSObject> holder,
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_interceptor(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- LookupResult lookup(isolate());
- LookupPostInterceptor(holder, name, &lookup);
- GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi,
- name, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_interceptor(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::INTERCEPTOR, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_array_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadArrayLength(masm(), rdx, rcx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_array_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_string_length(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_string_length(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype(
- Handle<String> name) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss;
-
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->keyed_load_function_prototype(), 1);
-
- // Check that the name has not changed.
- __ Cmp(rax, name);
- __ j(not_equal, &miss);
-
- GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss);
- __ bind(&miss);
- __ DecrementCounter(counters->keyed_load_function_prototype(), 1);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
-
- // Return the generated code.
- return GetCode(Code::CALLBACKS, name);
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, name);
}
@@ -3174,43 +2881,54 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
// -- rsp[0] : return address
// -----------------------------------
ElementsKind elements_kind = receiver_map->elements_kind();
- Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
-
- __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ if (receiver_map->has_fast_elements() ||
+ receiver_map->has_external_array_elements()) {
+ Handle<Code> stub = KeyedLoadFastElementStub(
+ receiver_map->instance_type() == JS_ARRAY_TYPE,
+ elements_kind).GetCode(isolate());
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ } else {
+ Handle<Code> stub =
+ KeyedLoadDictionaryElementStub().GetCode(isolate());
+ __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK);
+ }
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ jmp(ic, RelocInfo::CODE_TARGET);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string());
+ return GetCode(Code::IC_FRAGMENT, Code::NORMAL, factory()->empty_string());
}
-Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
+Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
MapHandleList* receiver_maps,
- CodeHandleList* handler_ics) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
+ CodeHandleList* handlers,
+ Handle<String> name,
+ Code::StubType type,
+ IcCheckType check) {
Label miss;
- __ JumpIfSmi(rdx, &miss);
- Register map_reg = rbx;
- __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset));
+ if (check == PROPERTY) {
+ GenerateNameCheck(name, this->name(), &miss);
+ }
+
+ __ JumpIfSmi(receiver(), &miss);
+ Register map_reg = scratch1();
+ __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
int receiver_count = receiver_maps->length();
for (int current = 0; current < receiver_count; ++current) {
// Check map and tail call if there's a match
__ Cmp(map_reg, receiver_maps->at(current));
- __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET);
+ __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
}
__ bind(&miss);
- GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+ GenerateLoadMiss(masm(), kind());
// Return the generated code.
- return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC);
+ InlineCacheState state =
+ receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC;
+ return GetCode(Code::IC_FRAGMENT, type, name, state);
}
@@ -3305,7 +3023,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
__ movq(Operand(r9, i * kPointerSize), rbx);
} else {
// Set the property to the constant value.
- Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
+ Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i),
+ isolate());
__ Move(Operand(r9, i * kPointerSize), constant);
}
}
@@ -3421,140 +3140,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
}
-void KeyedLoadStubCompiler::GenerateLoadExternalArray(
- MacroAssembler* masm,
- ElementsKind elements_kind) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label slow, miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Check that the index is in range.
- __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ SmiToInteger32(rcx, rax);
- __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset));
- // Unsigned comparison catches both negative and too-large values.
- __ j(above_equal, &miss_force_generic);
-
- // rax: index (as a smi)
- // rdx: receiver (JSObject)
- // rcx: untagged index
- // rbx: elements array
- __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset));
- // rbx: base pointer of external storage
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(rcx, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0));
- break;
- case EXTERNAL_DOUBLE_ELEMENTS:
- __ movsd(xmm0, Operand(rbx, rcx, times_8, 0));
- break;
- default:
- UNREACHABLE();
- break;
- }
-
- // rax: index
- // rdx: receiver
- // For integer array types:
- // rcx: value
- // For floating-point array type:
- // xmm0: value as double.
-
- ASSERT(kSmiValueSize == 32);
- if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- // For the UnsignedInt array type, we need to see whether
- // the value can be represented in a Smi. If not, we need to convert
- // it to a HeapNumber.
- Label box_int;
-
- __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear);
-
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
-
- __ bind(&box_int);
-
- // Allocate a HeapNumber for the int and perform int-to-double
- // conversion.
- // The value is zero-extended since we loaded the value from memory
- // with movl.
- __ cvtqsi2sd(xmm0, rcx);
-
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- // For the floating-point array type, we need to always allocate a
- // HeapNumber.
- __ AllocateHeapNumber(rcx, rbx, &slow);
- // Set the value.
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ movq(rax, rcx);
- __ ret(0);
- } else {
- __ Integer32ToSmi(rax, rcx);
- __ ret(0);
- }
-
- // Slow case: Jump to runtime.
- __ bind(&slow);
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
-
- Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(ic, RelocInfo::CODE_TARGET);
-
- // Miss case: Jump to runtime.
- __ bind(&miss_force_generic);
-
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
ElementsKind elements_kind) {
@@ -3744,98 +3329,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
-void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Load the result and make sure it's not the hole.
- SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2);
- __ movq(rbx, FieldOperand(rcx,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
- __ j(equal, &miss_force_generic);
- __ movq(rax, rbx);
- __ ret(0);
-
- __ bind(&miss_force_generic);
- Code* code = masm->isolate()->builtins()->builtin(
- Builtins::kKeyedLoadIC_MissForceGeneric);
- Handle<Code> ic(code);
- __ jmp(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
- MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- rax : key
- // -- rdx : receiver
- // -- rsp[0] : return address
- // -----------------------------------
- Label miss_force_generic, slow_allocate_heapnumber;
-
- // This stub is meant to be tail-jumped to, the receiver must already
- // have been verified by the caller to not be a smi.
-
- // Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic);
-
- // Get the elements array.
- __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
- __ AssertFastElements(rcx);
-
- // Check that the key is within bounds.
- __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
- __ j(above_equal, &miss_force_generic);
-
- // Check for the hole
- __ SmiToInteger32(kScratchRegister, rax);
- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
- __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset),
- Immediate(kHoleNanUpper32));
- __ j(equal, &miss_force_generic);
-
- // Always allocate a heap number for the result.
- __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8,
- FixedDoubleArray::kHeaderSize));
- __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
- // Set the value.
- __ movq(rax, rcx);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
- __ ret(0);
-
- __ bind(&slow_allocate_heapnumber);
- Handle<Code> slow_ic =
- masm->isolate()->builtins()->KeyedLoadIC_Slow();
- __ jmp(slow_ic, RelocInfo::CODE_TARGET);
-
- __ bind(&miss_force_generic);
- Handle<Code> miss_ic =
- masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
- __ jmp(miss_ic, RelocInfo::CODE_TARGET);
-}
-
-
void KeyedStoreStubCompiler::GenerateStoreFastElement(
MacroAssembler* masm,
bool is_js_array,
@@ -3990,7 +3483,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity;
+ Label grow, slow, check_capacity, restore_key_transition_elements_kind;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4019,7 +3512,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &transition_elements_kind);
+ &restore_key_transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4028,9 +3521,10 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
- __ bind(&transition_elements_kind);
+ __ bind(&restore_key_transition_elements_kind);
// Restore smi-tagging of rcx.
__ Integer32ToSmi(rcx, rcx);
+ __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
@@ -4071,6 +3565,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
Smi::FromInt(JSArray::kPreallocatedArrayElements));
+ // Increment the length of the array.
+ __ SmiToInteger32(rcx, rcx);
+ __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
+ &restore_key_transition_elements_kind);
+
+ __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
+ for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
+ __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
+ }
+
// Install the new backing store in the JSArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
__ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
@@ -4079,7 +3583,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Increment the length of the array.
__ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ jmp(&finish_store);
+ __ ret(0);
__ bind(&check_capacity);
// rax: value
diff --git a/src/3rdparty/v8/test/cctest/SConscript b/src/3rdparty/v8/test/cctest/SConscript
index bcd1e98..8ed3f52 100644
--- a/src/3rdparty/v8/test/cctest/SConscript
+++ b/src/3rdparty/v8/test/cctest/SConscript
@@ -94,6 +94,7 @@ SOURCES = {
'test-sockets.cc',
'test-spaces.cc',
'test-strings.cc',
+ 'test-symbols.cc',
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
diff --git a/src/3rdparty/v8/test/cctest/cctest.cc b/src/3rdparty/v8/test/cctest/cctest.cc
index f638ed4..f173760 100644
--- a/src/3rdparty/v8/test/cctest/cctest.cc
+++ b/src/3rdparty/v8/test/cctest/cctest.cc
@@ -69,8 +69,12 @@ static void PrintTestList(CcTest* current) {
}
+v8::Isolate* CcTest::default_isolate_;
+
int main(int argc, char* argv[]) {
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+ CcTest::set_default_isolate(v8::Isolate::GetCurrent());
+ CHECK(CcTest::default_isolate() != NULL);
int tests_run = 0;
bool print_run_count = true;
for (int i = 1; i < argc; i++) {
diff --git a/src/3rdparty/v8/test/cctest/cctest.gyp b/src/3rdparty/v8/test/cctest/cctest.gyp
index 80eecfd..eb0d907 100644
--- a/src/3rdparty/v8/test/cctest/cctest.gyp
+++ b/src/3rdparty/v8/test/cctest/cctest.gyp
@@ -69,6 +69,7 @@
'test-fixed-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
+ 'test-global-object.cc',
'test-hashing.cc',
'test-hashmap.cc',
'test-heap.cc',
@@ -81,6 +82,7 @@
'test-mark-compact.cc',
'test-object-observe.cc',
'test-parsing.cc',
+ 'test-platform.cc',
'test-platform-tls.cc',
'test-profile-generator.cc',
'test-random.cc',
@@ -90,6 +92,7 @@
'test-sockets.cc',
'test-spaces.cc',
'test-strings.cc',
+ 'test-symbols.cc',
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
diff --git a/src/3rdparty/v8/test/cctest/cctest.h b/src/3rdparty/v8/test/cctest/cctest.h
index f3961a4..30d9d7e 100644
--- a/src/3rdparty/v8/test/cctest/cctest.h
+++ b/src/3rdparty/v8/test/cctest/cctest.h
@@ -57,13 +57,17 @@ class CcTest {
CcTest(TestFunction* callback, const char* file, const char* name,
const char* dependency, bool enabled);
void Run() { callback_(); }
- static int test_count();
static CcTest* last() { return last_; }
CcTest* prev() { return prev_; }
const char* file() { return file_; }
const char* name() { return name_; }
const char* dependency() { return dependency_; }
bool enabled() { return enabled_; }
+ static void set_default_isolate(v8::Isolate* default_isolate) {
+ default_isolate_ = default_isolate;
+ }
+ static v8::Isolate* default_isolate() { return default_isolate_; }
+
private:
TestFunction* callback_;
const char* file_;
@@ -72,6 +76,7 @@ class CcTest {
bool enabled_;
static CcTest* last_;
CcTest* prev_;
+ static v8::Isolate* default_isolate_;
};
// Switches between all the Api tests using the threading support.
@@ -87,13 +92,6 @@ class CcTest {
class ApiTestFuzzer: public v8::internal::Thread {
public:
void CallTest();
- explicit ApiTestFuzzer(int num)
- : Thread("ApiTestFuzzer"),
- test_number_(num),
- gate_(v8::internal::OS::CreateSemaphore(0)),
- active_(true) {
- }
- ~ApiTestFuzzer() { delete gate_; }
// The ApiTestFuzzer is also a Thread, so it has a Run method.
virtual void Run();
@@ -112,6 +110,14 @@ class ApiTestFuzzer: public v8::internal::Thread {
static void Fuzz();
private:
+ explicit ApiTestFuzzer(int num)
+ : Thread("ApiTestFuzzer"),
+ test_number_(num),
+ gate_(v8::internal::OS::CreateSemaphore(0)),
+ active_(true) {
+ }
+ ~ApiTestFuzzer() { delete gate_; }
+
static bool fuzzing_;
static int tests_being_run_;
static int current_;
@@ -173,11 +179,13 @@ class LocalContext {
v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
: context_(v8::Context::New(extensions, global_template, global_object)) {
context_->Enter();
+ // We can't do this later perhaps because of a fatal error.
+ isolate_ = context_->GetIsolate();
}
virtual ~LocalContext() {
context_->Exit();
- context_.Dispose();
+ context_.Dispose(isolate_);
}
v8::Context* operator->() { return *context_; }
@@ -190,6 +198,7 @@ class LocalContext {
private:
v8::Persistent<v8::Context> context_;
+ v8::Isolate* isolate_;
};
@@ -233,4 +242,24 @@ static inline int FlagDependentPortOffset() {
}
+// Helper function that simulates a full new-space in the heap.
+static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
+ int new_linear_size = static_cast<int>(
+ *space->allocation_limit_address() - *space->allocation_top_address());
+ v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
+ v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
+ node->set_size(space->heap(), new_linear_size);
+}
+
+
+// Helper function that simulates a full old-space in the heap.
+static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
+ int old_linear_size = static_cast<int>(space->limit() - space->top());
+ space->Free(space->top(), old_linear_size);
+ space->SetTop(space->limit(), space->limit());
+ space->ResetFreeList();
+ space->ClearStats();
+}
+
+
#endif // ifndef CCTEST_H_
diff --git a/src/3rdparty/v8/test/cctest/cctest.status b/src/3rdparty/v8/test/cctest/cctest.status
index ab59e33..b457ef2 100644
--- a/src/3rdparty/v8/test/cctest/cctest.status
+++ b/src/3rdparty/v8/test/cctest/cctest.status
@@ -56,6 +56,9 @@ test-profile-generator/RecordStackTraceAtStartProfiling: PASS || FAIL
# We do not yet shrink weak maps after they have been emptied by the GC
test-weakmaps/Shrinking: FAIL
+# Deferred stack trace formatting is temporarily disabled.
+test-heap/ReleaseStackTraceData: PASS || FAIL
+
##############################################################################
[ $arch == arm ]
diff --git a/src/3rdparty/v8/test/cctest/test-accessors.cc b/src/3rdparty/v8/test/cctest/test-accessors.cc
index 0b342ff..0ff747c 100644
--- a/src/3rdparty/v8/test/cctest/test-accessors.cc
+++ b/src/3rdparty/v8/test/cctest/test-accessors.cc
@@ -225,7 +225,8 @@ THREADED_TEST(HandleScopePop) {
LocalContext context;
v8::Handle<v8::Object> inst = obj->NewInstance();
context->Global()->Set(v8::String::New("obj"), inst);
- int count_before = i::HandleScope::NumberOfHandles();
+ i::Isolate* isolate = i::Isolate::Current();
+ int count_before = i::HandleScope::NumberOfHandles(isolate);
{
v8::HandleScope scope;
CompileRun(
@@ -234,7 +235,7 @@ THREADED_TEST(HandleScopePop) {
" obj.many;"
"}");
}
- int count_after = i::HandleScope::NumberOfHandles();
+ int count_after = i::HandleScope::NumberOfHandles(isolate);
CHECK_EQ(count_before, count_after);
}
@@ -398,7 +399,7 @@ THREADED_TEST(Gc) {
static v8::Handle<Value> StackCheck(Local<String> name,
const AccessorInfo& info) {
- i::StackFrameIterator iter;
+ i::StackFrameIterator iter(reinterpret_cast<i::Isolate*>(info.GetIsolate()));
for (int i = 0; !iter.done(); i++) {
i::StackFrame* frame = iter.frame();
CHECK(i != 0 || (frame->type() == i::StackFrame::EXIT));
@@ -453,3 +454,29 @@ THREADED_TEST(HandleScopeSegment) {
"result;"))->Run();
CHECK_EQ(100, result->Int32Value());
}
+
+
+v8::Handle<v8::Array> JSONStringifyEnumerator(const AccessorInfo& info) {
+ v8::Handle<v8::Array> array = v8::Array::New(1);
+ array->Set(0, v8_str("regress"));
+ return array;
+}
+
+
+v8::Handle<v8::Value> JSONStringifyGetter(Local<String> name,
+ const AccessorInfo& info) {
+ return v8_str("crbug-161028");
+}
+
+
+THREADED_TEST(JSONStringifyNamedInterceptorObject) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
+ obj->SetNamedPropertyHandler(
+ JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator);
+ env->Global()->Set(v8_str("obj"), obj->NewInstance());
+ v8::Handle<v8::String> expected = v8_str("{\"regress\":\"crbug-161028\"}");
+ CHECK(CompileRun("JSON.stringify(obj)")->Equals(expected));
+}
diff --git a/src/3rdparty/v8/test/cctest/test-alloc.cc b/src/3rdparty/v8/test/cctest/test-alloc.cc
index 7ba2583..8cabd3d 100644
--- a/src/3rdparty/v8/test/cctest/test-alloc.cc
+++ b/src/3rdparty/v8/test/cctest/test-alloc.cc
@@ -34,34 +34,13 @@
using namespace v8::internal;
-// Also used in test-heap.cc test cases.
-void SimulateFullSpace(PagedSpace* space) {
- int old_linear_size = static_cast<int>(space->limit() - space->top());
- space->Free(space->top(), old_linear_size);
- space->SetTop(space->limit(), space->limit());
- space->ResetFreeList();
- space->ClearStats();
-}
-
-
static MaybeObject* AllocateAfterFailures() {
static int attempts = 0;
if (++attempts < 3) return Failure::RetryAfterGC();
Heap* heap = Isolate::Current()->heap();
// New space.
- NewSpace* new_space = heap->new_space();
- static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
- while (new_space->Available() > kNewSpaceFillerSize) {
- int available_before = static_cast<int>(new_space->Available());
- CHECK(!heap->AllocateByteArray(0)->IsFailure());
- if (available_before == new_space->Available()) {
- // It seems that we are avoiding new space allocations when
- // allocation is forced, so no need to fill up new space
- // in order to make the test harder.
- break;
- }
- }
+ SimulateFullSpace(heap->new_space());
CHECK(!heap->AllocateByteArray(100)->IsFailure());
CHECK(!heap->AllocateFixedArray(100, NOT_TENURED)->IsFailure());
@@ -76,7 +55,7 @@ static MaybeObject* AllocateAfterFailures() {
// Old data space.
SimulateFullSpace(heap->old_data_space());
- CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
+ CHECK(!heap->AllocateRawOneByteString(100, TENURED)->IsFailure());
// Old pointer space.
SimulateFullSpace(heap->old_pointer_space());
@@ -100,6 +79,7 @@ static MaybeObject* AllocateAfterFailures() {
CHECK(!heap->AllocateMap(JS_OBJECT_TYPE, instance_size)->IsFailure());
// Test that we can allocate in old pointer space and code space.
+ SimulateFullSpace(heap->code_space());
CHECK(!heap->AllocateFixedArray(100, TENURED)->IsFailure());
CHECK(!heap->CopyCode(Isolate::Current()->builtins()->builtin(
Builtins::kIllegal))->IsFailure());
@@ -141,7 +121,7 @@ TEST(StressJS) {
v8::HandleScope scope;
env->Enter();
Handle<JSFunction> function =
- FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
+ FACTORY->NewFunction(FACTORY->function_string(), FACTORY->null_value());
// Force the creation of an initial map and set the code to
// something empty.
FACTORY->NewJSObject(function);
@@ -224,7 +204,9 @@ TEST(CodeRange) {
(Page::kMaxNonCodeHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
- Address base = code_range->AllocateRawMemory(requested, &allocated);
+ Address base = code_range->AllocateRawMemory(requested,
+ requested,
+ &allocated);
CHECK(base != NULL);
blocks.Add(Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
diff --git a/src/3rdparty/v8/test/cctest/test-api.cc b/src/3rdparty/v8/test/cctest/test-api.cc
index f7325df..48d8436 100644
--- a/src/3rdparty/v8/test/cctest/test-api.cc
+++ b/src/3rdparty/v8/test/cctest/test-api.cc
@@ -168,6 +168,23 @@ THREADED_TEST(Handles) {
}
+THREADED_TEST(IsolateOfContext) {
+ v8::HandleScope scope;
+ v8::Persistent<Context> env = Context::New();
+
+ CHECK(!env->InContext());
+ CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+ env->Enter();
+ CHECK(env->InContext());
+ CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+ env->Exit();
+ CHECK(!env->InContext());
+ CHECK(env->GetIsolate() == v8::Isolate::GetCurrent());
+
+ env.Dispose(env->GetIsolate());
+}
+
+
THREADED_TEST(ReceiverSignature) {
v8::HandleScope scope;
LocalContext env;
@@ -596,8 +613,9 @@ THREADED_TEST(UsingExternalString) {
// Trigger GCs so that the newly allocated string moves to old gen.
HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
- CHECK(isymbol->IsSymbol());
+ i::Handle<i::String> isymbol =
+ FACTORY->InternalizedStringFromString(istring);
+ CHECK(isymbol->IsInternalizedString());
}
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -614,8 +632,9 @@ THREADED_TEST(UsingExternalAsciiString) {
// Trigger GCs so that the newly allocated string moves to old gen.
HEAP->CollectGarbage(i::NEW_SPACE); // in survivor space now
HEAP->CollectGarbage(i::NEW_SPACE); // in old gen now
- i::Handle<i::String> isymbol = FACTORY->SymbolFromString(istring);
- CHECK(isymbol->IsSymbol());
+ i::Handle<i::String> isymbol =
+ FACTORY->InternalizedStringFromString(istring);
+ CHECK(isymbol->IsInternalizedString());
}
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -623,6 +642,8 @@ THREADED_TEST(UsingExternalAsciiString) {
THREADED_TEST(ScavengeExternalString) {
+ i::FLAG_stress_compaction = false;
+ i::FLAG_gc_global = false;
int dispose_count = 0;
bool in_new_space = false;
{
@@ -643,6 +664,8 @@ THREADED_TEST(ScavengeExternalString) {
THREADED_TEST(ScavengeExternalAsciiString) {
+ i::FLAG_stress_compaction = false;
+ i::FLAG_gc_global = false;
int dispose_count = 0;
bool in_new_space = false;
{
@@ -846,9 +869,41 @@ THREADED_TEST(FunctionTemplate) {
}
+THREADED_TEST(FunctionTemplateSetLength) {
+ v8::HandleScope scope;
+ LocalContext env;
+ {
+ Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(
+ handle_call, Handle<v8::Value>(), Handle<v8::Signature>(), 23);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("obj"), fun);
+ Local<Script> script = v8_compile("obj.length");
+ CHECK_EQ(23, script->Run()->Int32Value());
+ }
+ {
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(handle_call);
+ fun_templ->SetLength(22);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("obj"), fun);
+ Local<Script> script = v8_compile("obj.length");
+ CHECK_EQ(22, script->Run()->Int32Value());
+ }
+ {
+ // Without setting length it defaults to 0.
+ Local<v8::FunctionTemplate> fun_templ =
+ v8::FunctionTemplate::New(handle_call);
+ Local<Function> fun = fun_templ->GetFunction();
+ env->Global()->Set(v8_str("obj"), fun);
+ Local<Script> script = v8_compile("obj.length");
+ CHECK_EQ(0, script->Run()->Int32Value());
+ }
+}
+
+
static void* expected_ptr;
static v8::Handle<v8::Value> callback(const v8::Arguments& args) {
- void* ptr = v8::External::Unwrap(args.Data());
+ void* ptr = v8::External::Cast(*args.Data())->Value();
CHECK_EQ(expected_ptr, ptr);
return v8::True();
}
@@ -858,7 +913,7 @@ static void TestExternalPointerWrapping() {
v8::HandleScope scope;
LocalContext env;
- v8::Handle<v8::Value> data = v8::External::Wrap(expected_ptr);
+ v8::Handle<v8::Value> data = v8::External::New(expected_ptr);
v8::Handle<v8::Object> obj = v8::Object::New();
obj->Set(v8_str("func"),
@@ -1796,8 +1851,8 @@ THREADED_TEST(PropertyHandlerInPrototype) {
Local<v8::Object> top = templ->GetFunction()->NewInstance();
Local<v8::Object> middle = templ->GetFunction()->NewInstance();
- bottom->Set(v8_str("__proto__"), middle);
- middle->Set(v8_str("__proto__"), top);
+ bottom->SetPrototype(middle);
+ middle->SetPrototype(top);
env->Global()->Set(v8_str("obj"), bottom);
// Indexed and named get.
@@ -1986,7 +2041,16 @@ THREADED_TEST(GlobalObjectInternalFields) {
}
-THREADED_TEST(InternalFieldsNativePointers) {
+static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
+ void* value) {
+ CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
+ obj->SetAlignedPointerInInternalField(0, value);
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+ CHECK_EQ(value, obj->GetAlignedPointerFromInternalField(0));
+}
+
+
+THREADED_TEST(InternalFieldsAlignedPointers) {
v8::HandleScope scope;
LocalContext env;
@@ -1995,64 +2059,78 @@ THREADED_TEST(InternalFieldsNativePointers) {
instance_templ->SetInternalFieldCount(1);
Local<v8::Object> obj = templ->GetFunction()->NewInstance();
CHECK_EQ(1, obj->InternalFieldCount());
- CHECK(obj->GetPointerFromInternalField(0) == NULL);
- char* data = new char[100];
+ CheckAlignedPointerInInternalField(obj, NULL);
- void* aligned = data;
- CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(aligned) & 0x1));
- void* unaligned = data + 1;
- CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
+ int* heap_allocated = new int[100];
+ CheckAlignedPointerInInternalField(obj, heap_allocated);
+ delete[] heap_allocated;
+
+ int stack_allocated[100];
+ CheckAlignedPointerInInternalField(obj, stack_allocated);
+
+ void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
+ CheckAlignedPointerInInternalField(obj, huge);
+}
- // Check reading and writing aligned pointers.
- obj->SetPointerInInternalField(0, aligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
- // Check reading and writing unaligned pointers.
- obj->SetPointerInInternalField(0, unaligned);
+static void CheckAlignedPointerInEmbedderData(LocalContext* env,
+ int index,
+ void* value) {
+ CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
+ (*env)->SetAlignedPointerInEmbedderData(index, value);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
+ CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
+}
- delete[] data;
+
+static void* AlignedTestPointer(int i) {
+ return reinterpret_cast<void*>(i * 1234);
}
-THREADED_TEST(InternalFieldsNativePointersAndExternal) {
+THREADED_TEST(EmbedderDataAlignedPointers) {
v8::HandleScope scope;
LocalContext env;
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
- Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
- instance_templ->SetInternalFieldCount(1);
- Local<v8::Object> obj = templ->GetFunction()->NewInstance();
- CHECK_EQ(1, obj->InternalFieldCount());
- CHECK(obj->GetPointerFromInternalField(0) == NULL);
+ CheckAlignedPointerInEmbedderData(&env, 0, NULL);
- char* data = new char[100];
+ int* heap_allocated = new int[100];
+ CheckAlignedPointerInEmbedderData(&env, 1, heap_allocated);
+ delete[] heap_allocated;
- void* aligned = data;
- CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(aligned) & 0x1));
- void* unaligned = data + 1;
- CHECK_EQ(1, static_cast<int>(reinterpret_cast<uintptr_t>(unaligned) & 0x1));
+ int stack_allocated[100];
+ CheckAlignedPointerInEmbedderData(&env, 2, stack_allocated);
- obj->SetPointerInInternalField(0, aligned);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0)));
+ void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
+ CheckAlignedPointerInEmbedderData(&env, 3, huge);
- obj->SetPointerInInternalField(0, unaligned);
+ // Test growing of the embedder data's backing store.
+ for (int i = 0; i < 100; i++) {
+ env->SetAlignedPointerInEmbedderData(i, AlignedTestPointer(i));
+ }
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0)));
+ for (int i = 0; i < 100; i++) {
+ CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
+ }
+}
- obj->SetInternalField(0, v8::External::Wrap(aligned));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(aligned, obj->GetPointerFromInternalField(0));
- obj->SetInternalField(0, v8::External::Wrap(unaligned));
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0));
+static void CheckEmbedderData(LocalContext* env,
+ int index,
+ v8::Handle<Value> data) {
+ (*env)->SetEmbedderData(index, data);
+ CHECK((*env)->GetEmbedderData(index)->StrictEquals(data));
+}
- delete[] data;
+THREADED_TEST(EmbedderData) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CheckEmbedderData(&env, 3, v8::String::New("The quick brown fox jumps"));
+ CheckEmbedderData(&env, 2, v8::String::New("over the lazy dog."));
+ CheckEmbedderData(&env, 1, v8::Number::New(1.2345));
+ CheckEmbedderData(&env, 0, v8::Boolean::New(true));
}
@@ -2223,40 +2301,51 @@ THREADED_TEST(External) {
// Make sure unaligned pointers are wrapped properly.
char* data = i::StrDup("0123456789");
- Local<v8::Value> zero = v8::External::Wrap(&data[0]);
- Local<v8::Value> one = v8::External::Wrap(&data[1]);
- Local<v8::Value> two = v8::External::Wrap(&data[2]);
- Local<v8::Value> three = v8::External::Wrap(&data[3]);
+ Local<v8::Value> zero = v8::External::New(&data[0]);
+ Local<v8::Value> one = v8::External::New(&data[1]);
+ Local<v8::Value> two = v8::External::New(&data[2]);
+ Local<v8::Value> three = v8::External::New(&data[3]);
- char* char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(zero));
+ char* char_ptr = reinterpret_cast<char*>(v8::External::Cast(*zero)->Value());
CHECK_EQ('0', *char_ptr);
- char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(one));
+ char_ptr = reinterpret_cast<char*>(v8::External::Cast(*one)->Value());
CHECK_EQ('1', *char_ptr);
- char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(two));
+ char_ptr = reinterpret_cast<char*>(v8::External::Cast(*two)->Value());
CHECK_EQ('2', *char_ptr);
- char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(three));
+ char_ptr = reinterpret_cast<char*>(v8::External::Cast(*three)->Value());
CHECK_EQ('3', *char_ptr);
i::DeleteArray(data);
}
THREADED_TEST(GlobalHandle) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
v8::Persistent<String> global;
{
v8::HandleScope scope;
Local<String> str = v8_str("str");
- global = v8::Persistent<String>::New(str);
+ global = v8::Persistent<String>::New(isolate, str);
}
CHECK_EQ(global->Length(), 3);
- global.Dispose();
+ global.Dispose(isolate);
{
v8::HandleScope scope;
Local<String> str = v8_str("str");
- global = v8::Persistent<String>::New(str);
+ global = v8::Persistent<String>::New(isolate, str);
}
CHECK_EQ(global->Length(), 3);
- global.Dispose(v8::Isolate::GetCurrent());
+ global.Dispose(isolate);
+}
+
+
+THREADED_TEST(LocalHandle) {
+ v8::HandleScope scope;
+ v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
+ CHECK_EQ(local->Length(), 3);
+
+ local = v8::Local<String>::New(v8::Isolate::GetCurrent(), v8_str("str"));
+ CHECK_EQ(local->Length(), 3);
}
@@ -2272,17 +2361,20 @@ class WeakCallCounter {
};
-static void WeakPointerCallback(Persistent<Value> handle, void* id) {
+static void WeakPointerCallback(v8::Isolate* isolate,
+ Persistent<Value> handle,
+ void* id) {
WeakCallCounter* counter = reinterpret_cast<WeakCallCounter*>(id);
CHECK_EQ(1234, counter->id());
counter->increment();
- handle.Dispose();
+ handle.Dispose(isolate);
}
THREADED_TEST(ApiObjectGroups) {
HandleScope scope;
LocalContext env;
+ v8::Isolate* iso = env->GetIsolate();
Persistent<Object> g1s1;
Persistent<Object> g1s2;
@@ -2295,22 +2387,22 @@ THREADED_TEST(ApiObjectGroups) {
{
HandleScope scope;
- g1s1 = Persistent<Object>::New(Object::New());
- g1s2 = Persistent<Object>::New(Object::New());
- g1c1 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1 = Persistent<Object>::New(iso, Object::New());
+ g1s2 = Persistent<Object>::New(iso, Object::New());
+ g1c1 = Persistent<Object>::New(iso, Object::New());
+ g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s1 = Persistent<Object>::New(Object::New());
- g2s2 = Persistent<Object>::New(Object::New());
- g2c1 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1 = Persistent<Object>::New(iso, Object::New());
+ g2s2 = Persistent<Object>::New(iso, Object::New());
+ g2c1 = Persistent<Object>::New(iso, Object::New());
+ g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
}
- Persistent<Object> root = Persistent<Object>::New(g1s1); // make a root.
+ Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
// Connect group 1 and 2, make a cycle.
CHECK(g1s2->Set(0, g2s2));
@@ -2333,11 +2425,11 @@ THREADED_TEST(ApiObjectGroups) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
- g1c1.ClearWeak();
- g2c1.ClearWeak();
+ g1c1.ClearWeak(iso);
+ g2c1.ClearWeak(iso);
// Groups are deleted, rebuild groups.
{
@@ -2357,8 +2449,8 @@ THREADED_TEST(ApiObjectGroups) {
CHECK_EQ(5, counter.NumberOfWeakCalls());
// And now make children weak again and collect them.
- g1c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2c1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2c1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
CHECK_EQ(7, counter.NumberOfWeakCalls());
@@ -2368,6 +2460,7 @@ THREADED_TEST(ApiObjectGroups) {
THREADED_TEST(ApiObjectGroupsCycle) {
HandleScope scope;
LocalContext env;
+ v8::Isolate* iso = env->GetIsolate();
WeakCallCounter counter(1234);
@@ -2377,26 +2470,41 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Object> g2s2;
Persistent<Object> g3s1;
Persistent<Object> g3s2;
+ Persistent<Object> g4s1;
+ Persistent<Object> g4s2;
{
HandleScope scope;
- g1s1 = Persistent<Object>::New(Object::New());
- g1s2 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
-
- g2s1 = Persistent<Object>::New(Object::New());
- g2s2 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
-
- g3s1 = Persistent<Object>::New(Object::New());
- g3s2 = Persistent<Object>::New(Object::New());
- g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- }
-
- Persistent<Object> root = Persistent<Object>::New(g1s1); // make a root.
+ g1s1 = Persistent<Object>::New(iso, Object::New());
+ g1s2 = Persistent<Object>::New(iso, Object::New());
+ g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ CHECK(g1s1.IsWeak(iso));
+ CHECK(g1s2.IsWeak(iso));
+
+ g2s1 = Persistent<Object>::New(iso, Object::New());
+ g2s2 = Persistent<Object>::New(iso, Object::New());
+ g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ CHECK(g2s1.IsWeak(iso));
+ CHECK(g2s2.IsWeak(iso));
+
+ g3s1 = Persistent<Object>::New(iso, Object::New());
+ g3s2 = Persistent<Object>::New(iso, Object::New());
+ g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ CHECK(g3s1.IsWeak(iso));
+ CHECK(g3s2.IsWeak(iso));
+
+ g4s1 = Persistent<Object>::New(iso, Object::New());
+ g4s2 = Persistent<Object>::New(iso, Object::New());
+ g4s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g4s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ CHECK(g4s1.IsWeak(iso));
+ CHECK(g4s2.IsWeak(iso));
+ }
+
+ Persistent<Object> root = Persistent<Object>::New(iso, g1s1); // make a root.
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
@@ -2407,13 +2515,17 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g2_children[] = { g3s1 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g1s1 };
+ Persistent<Value> g3_children[] = { g4s1 };
+ Persistent<Value> g4_objects[] = { g4s1, g4s2 };
+ Persistent<Value> g4_children[] = { g1s1 };
V8::AddObjectGroup(g1_objects, 2);
V8::AddImplicitReferences(g1s1, g1_children, 1);
V8::AddObjectGroup(g2_objects, 2);
V8::AddImplicitReferences(g2s1, g2_children, 1);
V8::AddObjectGroup(g3_objects, 2);
V8::AddImplicitReferences(g3s1, g3_children, 1);
+ V8::AddObjectGroup(iso, g4_objects, 2);
+ V8::AddImplicitReferences(g4s1, g4_children, 1);
}
// Do a single full GC
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -2422,7 +2534,7 @@ THREADED_TEST(ApiObjectGroupsCycle) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
// Groups are deleted, rebuild groups.
{
@@ -2431,27 +2543,34 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g2_children[] = { g3s1 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g1s1 };
+ Persistent<Value> g3_children[] = { g4s1 };
+ Persistent<Value> g4_objects[] = { g4s1, g4s2 };
+ Persistent<Value> g4_children[] = { g1s1 };
V8::AddObjectGroup(g1_objects, 2);
V8::AddImplicitReferences(g1s1, g1_children, 1);
V8::AddObjectGroup(g2_objects, 2);
V8::AddImplicitReferences(g2s1, g2_children, 1);
V8::AddObjectGroup(g3_objects, 2);
V8::AddImplicitReferences(g3s1, g3_children, 1);
+ V8::AddObjectGroup(g4_objects, 2);
+ V8::AddImplicitReferences(g4s1, g4_children, 1);
}
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
- // All objects should be gone. 7 global handles in total.
- CHECK_EQ(7, counter.NumberOfWeakCalls());
+ // All objects should be gone. 9 global handles in total.
+ CHECK_EQ(9, counter.NumberOfWeakCalls());
}
// TODO(mstarzinger): This should be a THREADED_TEST but causes failures
// on the buildbots, so was made non-threaded for the time being.
TEST(ApiObjectGroupsCycleForScavenger) {
+ i::FLAG_stress_compaction = false;
+ i::FLAG_gc_global = false;
HandleScope scope;
LocalContext env;
+ v8::Isolate* iso = env->GetIsolate();
WeakCallCounter counter(1234);
@@ -2464,36 +2583,36 @@ TEST(ApiObjectGroupsCycleForScavenger) {
{
HandleScope scope;
- g1s1 = Persistent<Object>::New(Object::New());
- g1s2 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s1 = Persistent<Object>::New(iso, Object::New());
+ g1s2 = Persistent<Object>::New(iso, Object::New());
+ g1s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g1s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s1 = Persistent<Object>::New(Object::New());
- g2s2 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s1 = Persistent<Object>::New(iso, Object::New());
+ g2s2 = Persistent<Object>::New(iso, Object::New());
+ g2s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g2s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s1 = Persistent<Object>::New(Object::New());
- g3s2 = Persistent<Object>::New(Object::New());
- g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s1 = Persistent<Object>::New(iso, Object::New());
+ g3s2 = Persistent<Object>::New(iso, Object::New());
+ g3s1.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ g3s2.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
}
// Make a root.
- Persistent<Object> root = Persistent<Object>::New(g1s1);
- root.MarkPartiallyDependent();
+ Persistent<Object> root = Persistent<Object>::New(iso, g1s1);
+ root.MarkPartiallyDependent(iso);
// Connect groups. We're building the following cycle:
// G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
// groups.
{
- g1s1.MarkPartiallyDependent();
- g1s2.MarkPartiallyDependent();
- g2s1.MarkPartiallyDependent();
- g2s2.MarkPartiallyDependent();
- g3s1.MarkPartiallyDependent();
- g3s2.MarkPartiallyDependent();
+ g1s1.MarkPartiallyDependent(iso);
+ g1s2.MarkPartiallyDependent(iso);
+ g2s1.MarkPartiallyDependent(iso);
+ g2s2.MarkPartiallyDependent(iso);
+ g3s1.MarkPartiallyDependent(iso);
+ g3s2.MarkPartiallyDependent(iso);
Persistent<Value> g1_objects[] = { g1s1, g1s2 };
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
@@ -2511,8 +2630,8 @@ TEST(ApiObjectGroupsCycleForScavenger) {
CHECK_EQ(0, counter.NumberOfWeakCalls());
// Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- root.MarkPartiallyDependent();
+ root.MakeWeak(iso, reinterpret_cast<void*>(&counter), &WeakPointerCallback);
+ root.MarkPartiallyDependent(iso);
v8::Isolate* isolate = v8::Isolate::GetCurrent();
// Groups are deleted, rebuild groups.
@@ -2554,11 +2673,24 @@ THREADED_TEST(ScriptException) {
}
+TEST(TryCatchCustomException) {
+ v8::HandleScope scope;
+ LocalContext env;
+ v8::TryCatch try_catch;
+ CompileRun("function CustomError() { this.a = 'b'; }"
+ "(function f() { throw new CustomError(); })();");
+ CHECK(try_catch.HasCaught());
+ CHECK(try_catch.Exception()->ToObject()->
+ Get(v8_str("a"))->Equals(v8_str("b")));
+}
+
+
bool message_received;
static void check_message_0(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
+ CHECK_EQ(5.76, data->NumberValue());
CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
CHECK_EQ(7.56, message->GetScriptData()->NumberValue());
message_received = true;
@@ -2569,7 +2701,7 @@ THREADED_TEST(MessageHandler0) {
message_received = false;
v8::HandleScope scope;
CHECK(!message_received);
- v8::V8::AddMessageListener(check_message_0);
+ v8::V8::AddMessageListener(check_message_0, v8_num(5.76));
LocalContext context;
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8_str("6.75"));
@@ -3606,9 +3738,34 @@ THREADED_TEST(TryCatchAndFinally) {
}
+static void TryCatchNestedHelper(int depth) {
+ if (depth > 0) {
+ v8::TryCatch try_catch;
+ try_catch.SetVerbose(true);
+ TryCatchNestedHelper(depth - 1);
+ CHECK(try_catch.HasCaught());
+ try_catch.ReThrow();
+ } else {
+ v8::ThrowException(v8_str("back"));
+ }
+}
+
+
+TEST(TryCatchNested) {
+ v8::V8::Initialize();
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::TryCatch try_catch;
+ TryCatchNestedHelper(5);
+ CHECK(try_catch.HasCaught());
+ CHECK_EQ(0, strcmp(*v8::String::Utf8Value(try_catch.Exception()), "back"));
+}
+
+
THREADED_TEST(Equality) {
v8::HandleScope scope;
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
// Check that equality works at all before relying on CHECK_EQ
CHECK(v8_str("a")->Equals(v8_str("a")));
CHECK(!v8_str("a")->Equals(v8_str("b")));
@@ -3619,7 +3776,7 @@ THREADED_TEST(Equality) {
CHECK_EQ(v8_num(1.00), v8_num(1));
CHECK_NE(v8_num(1), v8_num(2));
- // Assume String is not symbol.
+ // Assume String is not internalized.
CHECK(v8_str("a")->StrictEquals(v8_str("a")));
CHECK(!v8_str("a")->StrictEquals(v8_str("b")));
CHECK(!v8_str("5")->StrictEquals(v8_num(5)));
@@ -3632,9 +3789,10 @@ THREADED_TEST(Equality) {
CHECK(!v8::False()->StrictEquals(v8::Undefined()));
v8::Handle<v8::Object> obj = v8::Object::New();
- v8::Persistent<v8::Object> alias = v8::Persistent<v8::Object>::New(obj);
+ v8::Persistent<v8::Object> alias =
+ v8::Persistent<v8::Object>::New(isolate, obj);
CHECK(alias->StrictEquals(obj));
- alias.Dispose();
+ alias.Dispose(isolate);
}
@@ -3932,7 +4090,7 @@ static void SetXValue(Local<String> name,
CHECK_EQ(info.Data(), v8_str("donut"));
CHECK_EQ(name, v8_str("x"));
CHECK(xValue.IsEmpty());
- xValue = v8::Persistent<Value>::New(value);
+ xValue = v8::Persistent<Value>::New(info.GetIsolate(), value);
}
@@ -3947,7 +4105,7 @@ THREADED_TEST(SimplePropertyWrite) {
CHECK(xValue.IsEmpty());
script->Run();
CHECK_EQ(v8_num(4), xValue);
- xValue.Dispose();
+ xValue.Dispose(context->GetIsolate());
xValue = v8::Persistent<Value>();
}
}
@@ -3964,7 +4122,7 @@ THREADED_TEST(SetterOnly) {
CHECK(xValue.IsEmpty());
script->Run();
CHECK_EQ(v8_num(4), xValue);
- xValue.Dispose();
+ xValue.Dispose(context->GetIsolate());
xValue = v8::Persistent<Value>();
}
}
@@ -4074,7 +4232,7 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
CompileRun("var obj = { x : 0 }; delete obj.x;");
context1->Exit();
- context1.Dispose();
+ context1.Dispose(context1->GetIsolate());
}
@@ -4827,17 +4985,17 @@ template <typename T> static void USE(T) { }
// This test is not intended to be run, just type checked.
-static inline void PersistentHandles() {
+static inline void PersistentHandles(v8::Isolate* isolate) {
USE(PersistentHandles);
Local<String> str = v8_str("foo");
- v8::Persistent<String> p_str = v8::Persistent<String>::New(str);
+ v8::Persistent<String> p_str = v8::Persistent<String>::New(isolate, str);
USE(p_str);
Local<Script> scr = Script::Compile(v8_str(""));
- v8::Persistent<Script> p_scr = v8::Persistent<Script>::New(scr);
+ v8::Persistent<Script> p_scr = v8::Persistent<Script>::New(isolate, scr);
USE(p_scr);
Local<ObjectTemplate> templ = ObjectTemplate::New();
v8::Persistent<ObjectTemplate> p_templ =
- v8::Persistent<ObjectTemplate>::New(templ);
+ v8::Persistent<ObjectTemplate>::New(isolate, templ);
USE(p_templ);
}
@@ -4856,7 +5014,7 @@ THREADED_TEST(GlobalObjectTemplate) {
v8::Persistent<Context> context = Context::New(0, global_template);
Context::Scope context_scope(context);
Script::Compile(v8_str("JSNI_Log('LOG')"))->Run();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -5325,29 +5483,30 @@ class Snorkel {
class Whammy {
public:
- Whammy() {
- cursor_ = 0;
- }
- ~Whammy() {
- script_.Dispose();
- }
+ explicit Whammy(v8::Isolate* isolate) : cursor_(0), isolate_(isolate) { }
+ ~Whammy() { script_.Dispose(isolate_); }
v8::Handle<Script> getScript() {
- if (script_.IsEmpty())
- script_ = v8::Persistent<Script>::New(v8_compile("({}).blammo"));
+ if (script_.IsEmpty()) {
+ script_ = v8::Persistent<Script>::New(isolate_,
+ v8_compile("({}).blammo"));
+ }
return Local<Script>(*script_);
}
public:
static const int kObjectCount = 256;
int cursor_;
+ v8::Isolate* isolate_;
v8::Persistent<v8::Object> objects_[kObjectCount];
v8::Persistent<Script> script_;
};
-static void HandleWeakReference(v8::Persistent<v8::Value> obj, void* data) {
+static void HandleWeakReference(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
Snorkel* snorkel = reinterpret_cast<Snorkel*>(data);
delete snorkel;
- obj.ClearWeak();
+ obj.ClearWeak(isolate);
}
v8::Handle<Value> WhammyPropertyGetter(Local<String> name,
@@ -5358,10 +5517,11 @@ v8::Handle<Value> WhammyPropertyGetter(Local<String> name,
v8::Persistent<v8::Object> prev = whammy->objects_[whammy->cursor_];
v8::Handle<v8::Object> obj = v8::Object::New();
- v8::Persistent<v8::Object> global = v8::Persistent<v8::Object>::New(obj);
+ v8::Persistent<v8::Object> global =
+ v8::Persistent<v8::Object>::New(info.GetIsolate(), obj);
if (!prev.IsEmpty()) {
prev->Set(v8_str("next"), obj);
- prev.MakeWeak(new Snorkel(), &HandleWeakReference);
+ prev.MakeWeak(info.GetIsolate(), new Snorkel(), &HandleWeakReference);
whammy->objects_[whammy->cursor_].Clear();
}
whammy->objects_[whammy->cursor_] = global;
@@ -5372,7 +5532,7 @@ v8::Handle<Value> WhammyPropertyGetter(Local<String> name,
THREADED_TEST(WeakReference) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New();
- Whammy* whammy = new Whammy();
+ Whammy* whammy = new Whammy(v8::Isolate::GetCurrent());
templ->SetNamedPropertyHandler(WhammyPropertyGetter,
0, 0, 0, 0,
v8::External::New(whammy));
@@ -5395,12 +5555,14 @@ THREADED_TEST(WeakReference) {
v8::Handle<Value> result = CompileRun(code);
CHECK_EQ(4.0, result->NumberValue());
delete whammy;
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
-static void DisposeAndSetFlag(v8::Persistent<v8::Value> obj, void* data) {
- obj.Dispose();
+static void DisposeAndSetFlag(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
+ obj.Dispose(isolate);
obj.Clear();
*(reinterpret_cast<bool*>(data)) = true;
}
@@ -5408,27 +5570,25 @@ static void DisposeAndSetFlag(v8::Persistent<v8::Value> obj, void* data) {
THREADED_TEST(IndependentWeakHandle) {
v8::Persistent<Context> context = Context::New();
+ v8::Isolate* iso = context->GetIsolate();
Context::Scope context_scope(context);
v8::Persistent<v8::Object> object_a, object_b;
{
v8::HandleScope handle_scope;
- object_a = v8::Persistent<v8::Object>::New(v8::Object::New());
- object_b = v8::Persistent<v8::Object>::New(v8::Object::New());
+ object_a = v8::Persistent<v8::Object>::New(iso, v8::Object::New());
+ object_b = v8::Persistent<v8::Object>::New(iso, v8::Object::New());
}
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
bool object_a_disposed = false;
bool object_b_disposed = false;
- object_a.MakeWeak(&object_a_disposed, &DisposeAndSetFlag);
- object_b.MakeWeak(&object_b_disposed, &DisposeAndSetFlag);
- CHECK(!object_a.IsIndependent());
- CHECK(!object_b.IsIndependent(isolate));
- object_a.MarkIndependent();
- object_b.MarkIndependent(isolate);
- CHECK(object_a.IsIndependent());
- CHECK(object_b.IsIndependent(isolate));
+ object_a.MakeWeak(iso, &object_a_disposed, &DisposeAndSetFlag);
+ object_b.MakeWeak(iso, &object_b_disposed, &DisposeAndSetFlag);
+ CHECK(!object_b.IsIndependent(iso));
+ object_a.MarkIndependent(iso);
+ object_b.MarkIndependent(iso);
+ CHECK(object_b.IsIndependent(iso));
HEAP->PerformScavenge();
CHECK(object_a_disposed);
CHECK(object_b_disposed);
@@ -5445,16 +5605,20 @@ static void InvokeMarkSweep() {
}
-static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) {
- obj.Dispose();
+static void ForceScavenge(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
+ obj.Dispose(isolate);
obj.Clear();
*(reinterpret_cast<bool*>(data)) = true;
InvokeScavenge();
}
-static void ForceMarkSweep(v8::Persistent<v8::Value> obj, void* data) {
- obj.Dispose();
+static void ForceMarkSweep(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
+ obj.Dispose(isolate);
obj.Clear();
*(reinterpret_cast<bool*>(data)) = true;
InvokeMarkSweep();
@@ -5463,10 +5627,11 @@ static void ForceMarkSweep(v8::Persistent<v8::Value> obj, void* data) {
THREADED_TEST(GCFromWeakCallbacks) {
v8::Persistent<Context> context = Context::New();
+ v8::Isolate* isolate = context->GetIsolate();
Context::Scope context_scope(context);
static const int kNumberOfGCTypes = 2;
- v8::WeakReferenceCallback gc_forcing_callback[kNumberOfGCTypes] =
+ v8::NearDeathCallback gc_forcing_callback[kNumberOfGCTypes] =
{&ForceScavenge, &ForceMarkSweep};
typedef void (*GCInvoker)();
@@ -5477,11 +5642,11 @@ THREADED_TEST(GCFromWeakCallbacks) {
v8::Persistent<v8::Object> object;
{
v8::HandleScope handle_scope;
- object = v8::Persistent<v8::Object>::New(v8::Object::New());
+ object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
bool disposed = false;
- object.MakeWeak(&disposed, gc_forcing_callback[inner_gc]);
- object.MarkIndependent();
+ object.MakeWeak(isolate, &disposed, gc_forcing_callback[inner_gc]);
+ object.MarkIndependent(isolate);
invoke_gc[outer_gc]();
CHECK(disposed);
}
@@ -5489,8 +5654,10 @@ THREADED_TEST(GCFromWeakCallbacks) {
}
-static void RevivingCallback(v8::Persistent<v8::Value> obj, void* data) {
- obj.ClearWeak();
+static void RevivingCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> obj,
+ void* data) {
+ obj.ClearWeak(isolate);
*(reinterpret_cast<bool*>(data)) = true;
}
@@ -5498,18 +5665,19 @@ static void RevivingCallback(v8::Persistent<v8::Value> obj, void* data) {
THREADED_TEST(IndependentHandleRevival) {
v8::Persistent<Context> context = Context::New();
Context::Scope context_scope(context);
+ v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> object;
{
v8::HandleScope handle_scope;
- object = v8::Persistent<v8::Object>::New(v8::Object::New());
+ object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
object->Set(v8_str("x"), v8::Integer::New(1));
v8::Local<String> y_str = v8_str("y");
object->Set(y_str, y_str);
}
bool revived = false;
- object.MakeWeak(&revived, &RevivingCallback);
- object.MarkIndependent();
+ object.MakeWeak(isolate, &revived, &RevivingCallback);
+ object.MarkIndependent(isolate);
HEAP->PerformScavenge();
CHECK(revived);
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -6070,6 +6238,10 @@ THREADED_TEST(StringWrite) {
CHECK_EQ(0, strcmp("abc", buf));
CHECK_EQ(0, buf[3]);
CHECK_EQ(0, strcmp("def", buf + 4));
+
+ CHECK_EQ(0, str->WriteAscii(NULL, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->WriteUtf8(NULL, 0, 0, String::NO_NULL_TERMINATION));
+ CHECK_EQ(0, str->Write(NULL, 0, 0, String::NO_NULL_TERMINATION));
}
@@ -6087,8 +6259,10 @@ static void Utf16Helper(
Local<v8::String>::Cast(a->Get(i));
Local<v8::Number> expected_len =
Local<v8::Number>::Cast(alens->Get(i));
+#ifndef ENABLE_LATIN_1
CHECK_EQ(expected_len->Value() != string->Length(),
string->MayContainNonAscii());
+#endif
int length = GetUtf8Length(string);
CHECK_EQ(static_cast<int>(expected_len->Value()), length);
}
@@ -6599,10 +6773,10 @@ TEST(SecurityHandler) {
}
context1->Exit();
- context1.Dispose();
+ context1.Dispose(context1->GetIsolate());
context0->Exit();
- context0.Dispose();
+ context0.Dispose(context0->GetIsolate());
}
@@ -6646,7 +6820,7 @@ THREADED_TEST(SecurityChecks) {
CHECK(try_catch.HasCaught());
}
- env2.Dispose();
+ env2.Dispose(env2->GetIsolate());
}
@@ -6715,7 +6889,7 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
CHECK(!access_f3->Run()->Equals(v8_num(101)));
CHECK(access_f3->Run()->IsUndefined());
}
- other.Dispose();
+ other.Dispose(other->GetIsolate());
}
@@ -6748,7 +6922,7 @@ THREADED_TEST(CrossDomainDelete) {
CHECK(v->IsNumber());
CHECK_EQ(3, v->Int32Value());
- env2.Dispose();
+ env2.Dispose(env2->GetIsolate());
}
@@ -6783,7 +6957,7 @@ THREADED_TEST(CrossDomainIsPropertyEnumerable) {
CHECK(result->IsFalse());
}
- env2.Dispose();
+ env2.Dispose(env2->GetIsolate());
}
@@ -6816,7 +6990,7 @@ THREADED_TEST(CrossDomainForIn) {
"return true;})()");
CHECK(result->IsTrue());
}
- env2.Dispose();
+ env2.Dispose(env2->GetIsolate());
}
@@ -6879,8 +7053,8 @@ TEST(ContextDetachGlobal) {
CHECK(r->IsUndefined());
}
- env2.Dispose();
- env3.Dispose();
+ env2.Dispose(env2->GetIsolate());
+ env3.Dispose(env3->GetIsolate());
}
@@ -6958,8 +7132,8 @@ TEST(DetachAndReattachGlobal) {
CHECK(result->IsInt32());
CHECK_EQ(42, result->Int32Value());
- env2.Dispose();
- env3.Dispose();
+ env2.Dispose(env2->GetIsolate());
+ env3.Dispose(env3->GetIsolate());
}
@@ -7249,8 +7423,8 @@ TEST(AccessControl) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -7378,14 +7552,23 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
+}
+
+
+static v8::Handle<v8::Array> IndexedPropertyEnumerator(const AccessorInfo&) {
+ v8::Handle<v8::Array> result = v8::Array::New(2);
+ result->Set(0, v8::Integer::New(7));
+ result->Set(1, v8::Object::New());
+ return result;
}
static v8::Handle<v8::Array> NamedPropertyEnumerator(const AccessorInfo& info) {
- v8::Handle<v8::Array> result = v8::Array::New(1);
+ v8::Handle<v8::Array> result = v8::Array::New(2);
result->Set(0, v8_str("x"));
+ result->Set(1, v8::Object::New());
return result;
}
@@ -7394,7 +7577,10 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New();
+ obj_template->Set(v8_str("7"), v8::Integer::New(7));
obj_template->Set(v8_str("x"), v8::Integer::New(42));
+ obj_template->SetIndexedPropertyHandler(NULL, NULL, NULL, NULL,
+ IndexedPropertyEnumerator);
obj_template->SetNamedPropertyHandler(NULL, NULL, NULL, NULL,
NamedPropertyEnumerator);
@@ -7402,9 +7588,17 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
v8::Handle<v8::Object> global = context->Global();
global->Set(v8_str("object"), obj_template->NewInstance());
- v8::Handle<Value> value =
- CompileRun("Object.getOwnPropertyNames(object).join(',')");
- CHECK_EQ(v8_str("x"), value);
+ v8::Handle<v8::Value> result =
+ CompileRun("Object.getOwnPropertyNames(object)");
+ CHECK(result->IsArray());
+ v8::Handle<v8::Array> result_array = v8::Handle<v8::Array>::Cast(result);
+ CHECK_EQ(3, result_array->Length());
+ CHECK(result_array->Get(0)->IsString());
+ CHECK(result_array->Get(1)->IsString());
+ CHECK(result_array->Get(2)->IsString());
+ CHECK_EQ(v8_str("7"), result_array->Get(0));
+ CHECK_EQ(v8_str("[object Object]"), result_array->Get(1));
+ CHECK_EQ(v8_str("x"), result_array->Get(2));
}
@@ -7462,8 +7656,8 @@ THREADED_TEST(CrossDomainAccessors) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -7597,8 +7791,8 @@ TEST(AccessControlIC) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -7672,8 +7866,8 @@ THREADED_TEST(AccessControlFlatten) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -7764,8 +7958,8 @@ THREADED_TEST(AccessControlInterceptorIC) {
context1->Exit();
context0->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -7955,12 +8149,8 @@ THREADED_TEST(ShadowObject) {
Local<ObjectTemplate> proto = t->PrototypeTemplate();
Local<ObjectTemplate> instance = t->InstanceTemplate();
- // Only allow calls of f on instances of t.
- Local<v8::Signature> signature = v8::Signature::New(t);
proto->Set(v8_str("f"),
- v8::FunctionTemplate::New(ShadowFunctionCallback,
- Local<Value>(),
- signature));
+ v8::FunctionTemplate::New(ShadowFunctionCallback, Local<Value>()));
proto->Set(v8_str("x"), v8_num(12));
instance->SetAccessor(v8_str("y"), ShadowYGetter, ShadowYSetter);
@@ -8031,6 +8221,66 @@ THREADED_TEST(HiddenPrototype) {
}
+THREADED_TEST(HiddenPrototypeSet) {
+ v8::HandleScope handle_scope;
+ LocalContext context;
+
+ Local<v8::FunctionTemplate> ot = v8::FunctionTemplate::New();
+ Local<v8::FunctionTemplate> ht = v8::FunctionTemplate::New();
+ ht->SetHiddenPrototype(true);
+ Local<v8::FunctionTemplate> pt = v8::FunctionTemplate::New();
+ ht->InstanceTemplate()->Set(v8_str("x"), v8_num(0));
+
+ Local<v8::Object> o = ot->GetFunction()->NewInstance();
+ Local<v8::Object> h = ht->GetFunction()->NewInstance();
+ Local<v8::Object> p = pt->GetFunction()->NewInstance();
+ o->Set(v8_str("__proto__"), h);
+ h->Set(v8_str("__proto__"), p);
+
+ // Setting a property that exists on the hidden prototype goes there.
+ o->Set(v8_str("x"), v8_num(7));
+ CHECK_EQ(7, o->Get(v8_str("x"))->Int32Value());
+ CHECK_EQ(7, h->Get(v8_str("x"))->Int32Value());
+ CHECK(p->Get(v8_str("x"))->IsUndefined());
+
+ // Setting a new property should not be forwarded to the hidden prototype.
+ o->Set(v8_str("y"), v8_num(6));
+ CHECK_EQ(6, o->Get(v8_str("y"))->Int32Value());
+ CHECK(h->Get(v8_str("y"))->IsUndefined());
+ CHECK(p->Get(v8_str("y"))->IsUndefined());
+
+ // Setting a property that only exists on a prototype of the hidden prototype
+ // is treated normally again.
+ p->Set(v8_str("z"), v8_num(8));
+ CHECK_EQ(8, o->Get(v8_str("z"))->Int32Value());
+ CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
+ CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
+ o->Set(v8_str("z"), v8_num(9));
+ CHECK_EQ(9, o->Get(v8_str("z"))->Int32Value());
+ CHECK_EQ(8, h->Get(v8_str("z"))->Int32Value());
+ CHECK_EQ(8, p->Get(v8_str("z"))->Int32Value());
+}
+
+
+// Regression test for issue 2457.
+THREADED_TEST(HiddenPrototypeIdentityHash) {
+ v8::HandleScope handle_scope;
+ LocalContext context;
+
+ Handle<FunctionTemplate> t = FunctionTemplate::New();
+ t->SetHiddenPrototype(true);
+ t->InstanceTemplate()->Set(v8_str("foo"), v8_num(75));
+ Handle<Object> p = t->GetFunction()->NewInstance();
+ Handle<Object> o = Object::New();
+ o->SetPrototype(p);
+
+ int hash = o->GetIdentityHash();
+ USE(hash);
+ o->Set(v8_str("foo"), v8_num(42));
+ ASSERT_EQ(hash, o->GetIdentityHash());
+}
+
+
THREADED_TEST(SetPrototype) {
v8::HandleScope handle_scope;
LocalContext context;
@@ -8599,8 +8849,8 @@ THREADED_TEST(EvalInDetachedGlobal) {
CHECK(catcher.HasCaught());
context1->Exit();
- context1.Dispose();
- context0.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context0.Dispose(context0->GetIsolate());
}
@@ -9423,7 +9673,7 @@ THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
call_ic_function4 =
v8_compile("function f(x) { return x - 1; }; f")->Run();
v8::Handle<Value> value = CompileRun(
- "o.__proto__.x = function(x) { return x + 1; };"
+ "Object.getPrototypeOf(o).x = function(x) { return x + 1; };"
"var result = 0;"
"for (var i = 0; i < 1000; i++) {"
" result = o.x(42);"
@@ -9642,7 +9892,8 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
static v8::Handle<Value> InterceptorCallICFastApi(Local<String> name,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
- int* call_count = reinterpret_cast<int*>(v8::External::Unwrap(info.Data()));
+ int* call_count =
+ reinterpret_cast<int*>(v8::External::Cast(*info.Data())->Value());
++(*call_count);
if ((*call_count) % 20 == 0) {
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -9801,7 +10052,7 @@ THREADED_TEST(InterceptorCallICFastApi_TrivialSignature) {
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -9825,10 +10076,11 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -9855,10 +10107,11 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -9891,10 +10144,11 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -9927,10 +10181,11 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -9966,10 +10221,11 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
- v8::External::Wrap(&interceptor_call_count));
+ v8::External::New(&interceptor_call_count));
LocalContext context;
v8::Handle<v8::Function> fun = fun_templ->GetFunction();
GenerateSomeGarbage();
@@ -10028,6 +10284,7 @@ THREADED_TEST(CallICFastApi_SimpleSignature) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10055,6 +10312,7 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss1) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10087,6 +10345,7 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10113,6 +10372,42 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
+THREADED_TEST(CallICFastApi_SimpleSignature_TypeError) {
+ v8::HandleScope scope;
+ v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
+ v8::Handle<v8::FunctionTemplate> method_templ =
+ v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
+ v8_str("method_data"),
+ v8::Signature::New(fun_templ));
+ v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
+ proto_templ->Set(v8_str("method"), method_templ);
+ fun_templ->SetHiddenPrototype(true);
+ v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
+ CHECK(!templ.IsEmpty());
+ LocalContext context;
+ v8::Handle<v8::Function> fun = fun_templ->GetFunction();
+ GenerateSomeGarbage();
+ context->Global()->Set(v8_str("o"), fun->NewInstance());
+ v8::TryCatch try_catch;
+ CompileRun(
+ "o.foo = 17;"
+ "var receiver = {};"
+ "receiver.__proto__ = o;"
+ "var result = 0;"
+ "var saved_result = 0;"
+ "for (var i = 0; i < 100; i++) {"
+ " result = receiver.method(41);"
+ " if (i == 50) {"
+ " saved_result = result;"
+ " receiver = Object.create(receiver);"
+ " }"
+ "}");
+ CHECK(try_catch.HasCaught());
+ CHECK_EQ(v8_str("TypeError: Illegal invocation"),
+ try_catch.Exception()->ToString());
+ CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
+}
+
v8::Handle<Value> keyed_call_ic_function;
@@ -10760,7 +11055,7 @@ void ApiTestFuzzer::Run() {
gate_->Wait();
{
// ... get the V8 lock and start running the test.
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
CallTest();
}
// This test finished.
@@ -10824,7 +11119,7 @@ void ApiTestFuzzer::ContextSwitch() {
// If the new thread is the same as the current thread there is nothing to do.
if (NextThread()) {
// Now it can start.
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(CcTest::default_isolate());
// Wait till someone starts us again.
gate_->Wait();
// And we're off.
@@ -10842,7 +11137,10 @@ void ApiTestFuzzer::TearDown() {
// Lets not be needlessly self-referential.
-TEST(Threading) {
+TEST(Threading1) {
+ // TODO(mstarzinger): Disabled in GC stress mode for now, we should find the
+ // correct timeout for this an re-enable this test again
+ if (i::FLAG_stress_compaction) return;
ApiTestFuzzer::SetUp(ApiTestFuzzer::FIRST_PART);
ApiTestFuzzer::RunAllTests();
ApiTestFuzzer::TearDown();
@@ -10876,12 +11174,12 @@ void ApiTestFuzzer::CallTest() {
static v8::Handle<Value> ThrowInJS(const v8::Arguments& args) {
- CHECK(v8::Locker::IsLocked());
+ CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(CcTest::default_isolate());
const char* code = "throw 7;";
{
- v8::Locker nested_locker;
+ v8::Locker nested_locker(CcTest::default_isolate());
v8::HandleScope scope;
v8::Handle<Value> exception;
{ v8::TryCatch try_catch;
@@ -10899,12 +11197,12 @@ static v8::Handle<Value> ThrowInJS(const v8::Arguments& args) {
static v8::Handle<Value> ThrowInJSNoCatch(const v8::Arguments& args) {
- CHECK(v8::Locker::IsLocked());
+ CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(CcTest::default_isolate());
const char* code = "throw 7;";
{
- v8::Locker nested_locker;
+ v8::Locker nested_locker(CcTest::default_isolate());
v8::HandleScope scope;
v8::Handle<Value> value = CompileRun(code);
CHECK(value.IsEmpty());
@@ -10916,8 +11214,8 @@ static v8::Handle<Value> ThrowInJSNoCatch(const v8::Arguments& args) {
// These are locking tests that don't need to be run again
// as part of the locking aggregation tests.
TEST(NestedLockers) {
- v8::Locker locker;
- CHECK(v8::Locker::IsLocked());
+ v8::Locker locker(CcTest::default_isolate());
+ CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
v8::HandleScope scope;
LocalContext env;
Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(ThrowInJS);
@@ -10938,7 +11236,7 @@ TEST(NestedLockers) {
// These are locking tests that don't need to be run again
// as part of the locking aggregation tests.
TEST(NestedLockersNoTryCatch) {
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::HandleScope scope;
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
@@ -10958,24 +11256,24 @@ TEST(NestedLockersNoTryCatch) {
THREADED_TEST(RecursiveLocking) {
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
{
- v8::Locker locker2;
- CHECK(v8::Locker::IsLocked());
+ v8::Locker locker2(CcTest::default_isolate());
+ CHECK(v8::Locker::IsLocked(CcTest::default_isolate()));
}
}
static v8::Handle<Value> UnlockForAMoment(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(CcTest::default_isolate());
return v8::Undefined();
}
THREADED_TEST(LockUnlockLock) {
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::HandleScope scope;
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
@@ -10989,7 +11287,7 @@ THREADED_TEST(LockUnlockLock) {
CHECK_EQ(42, script->Run()->Int32Value());
}
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::HandleScope scope;
LocalContext env;
Local<v8::FunctionTemplate> fun_templ =
@@ -11008,7 +11306,7 @@ THREADED_TEST(LockUnlockLock) {
static int GetGlobalObjectsCount() {
i::Isolate::Current()->heap()->EnsureHeapIsIterable();
int count = 0;
- i::HeapIterator it;
+ i::HeapIterator it(HEAP);
for (i::HeapObject* object = it.next(); object != NULL; object = it.next())
if (object->IsJSGlobalObject()) count++;
return count;
@@ -11072,79 +11370,90 @@ TEST(DontLeakGlobalObjects) {
v8::Persistent<v8::Object> some_object;
v8::Persistent<v8::Object> bad_handle;
-void NewPersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
+void NewPersistentHandleCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void*) {
v8::HandleScope scope;
- bad_handle = v8::Persistent<v8::Object>::New(some_object);
- handle.Dispose();
+ bad_handle = v8::Persistent<v8::Object>::New(isolate, some_object);
+ handle.Dispose(isolate);
}
THREADED_TEST(NewPersistentHandleFromWeakCallback) {
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope;
- some_object = v8::Persistent<v8::Object>::New(v8::Object::New());
- handle1 = v8::Persistent<v8::Object>::New(v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ some_object = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
// Note: order is implementation dependent alas: currently
// global handle nodes are processed by PostGarbageCollectionProcessing
// in reverse allocation order, so if second allocated handle is deleted,
// weak callback of the first handle would be able to 'reallocate' it.
- handle1.MakeWeak(NULL, NewPersistentHandleCallback);
- handle2.Dispose();
+ handle1.MakeWeak(isolate, NULL, NewPersistentHandleCallback);
+ handle2.Dispose(isolate);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
v8::Persistent<v8::Object> to_be_disposed;
-void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) {
- to_be_disposed.Dispose();
+void DisposeAndForceGcCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void*) {
+ to_be_disposed.Dispose(isolate);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- handle.Dispose();
+ handle.Dispose(isolate);
}
THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> handle1, handle2;
{
v8::HandleScope scope;
- handle1 = v8::Persistent<v8::Object>::New(v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
- handle1.MakeWeak(NULL, DisposeAndForceGcCallback);
+ handle1.MakeWeak(isolate, NULL, DisposeAndForceGcCallback);
to_be_disposed = handle2;
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
-void DisposingCallback(v8::Persistent<v8::Value> handle, void*) {
- handle.Dispose();
+void DisposingCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void*) {
+ handle.Dispose(isolate);
}
-void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) {
+void HandleCreatingCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void*) {
v8::HandleScope scope;
- v8::Persistent<v8::Object>::New(v8::Object::New());
- handle.Dispose();
+ v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle.Dispose(isolate);
}
THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> handle1, handle2, handle3;
{
v8::HandleScope scope;
- handle3 = v8::Persistent<v8::Object>::New(v8::Object::New());
- handle2 = v8::Persistent<v8::Object>::New(v8::Object::New());
- handle1 = v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle3 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle2 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ handle1 = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
- handle2.MakeWeak(NULL, DisposingCallback);
- handle3.MakeWeak(NULL, HandleCreatingCallback);
+ handle2.MakeWeak(isolate, NULL, DisposingCallback);
+ handle3.MakeWeak(isolate, NULL, HandleCreatingCallback);
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
}
@@ -11190,7 +11499,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
v8::Handle<String> str(value->ToString());
CHECK(!str.IsEmpty());
env->Exit();
- env.Dispose();
+ env.Dispose(env->GetIsolate());
}
@@ -11264,6 +11573,7 @@ static void RunLoopInNewEnv() {
TEST(SetFunctionEntryHook) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_use_inlining = false;
// Test setting and resetting the entry hook.
// Nulling it should always succeed.
@@ -11301,6 +11611,7 @@ TEST(SetFunctionEntryHook) {
static i::HashMap* code_map = NULL;
+static i::HashMap* jitcode_line_info = NULL;
static int saw_bar = 0;
static int move_events = 0;
@@ -11340,6 +11651,10 @@ static bool FunctionNameIs(const char* expected,
static void event_handler(const v8::JitCodeEvent* event) {
CHECK(event != NULL);
CHECK(code_map != NULL);
+ CHECK(jitcode_line_info != NULL);
+
+ class DummyJitCodeLineInfo {
+ };
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
@@ -11388,6 +11703,43 @@ static void event_handler(const v8::JitCodeEvent* event) {
// Object/code removal events are currently not dispatched from the GC.
CHECK(false);
break;
+
+ // For CODE_START_LINE_INFO_RECORDING event, we will create one
+ // DummyJitCodeLineInfo data structure pointed by event->user_dat. We
+ // record it in jitcode_line_info.
+ case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: {
+ DummyJitCodeLineInfo* line_info = new DummyJitCodeLineInfo();
+ v8::JitCodeEvent* temp_event = const_cast<v8::JitCodeEvent*>(event);
+ temp_event->user_data = line_info;
+ i::HashMap::Entry* entry =
+ jitcode_line_info->Lookup(line_info,
+ i::ComputePointerHash(line_info),
+ true);
+ entry->value = reinterpret_cast<void*>(line_info);
+ }
+ break;
+ // For these two events, we will check whether the event->user_data
+ // data structure is created before during CODE_START_LINE_INFO_RECORDING
+ // event. And delete it in CODE_END_LINE_INFO_RECORDING event handling.
+ case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
+ CHECK(event->user_data != NULL);
+ uint32_t hash = i::ComputePointerHash(event->user_data);
+ i::HashMap::Entry* entry =
+ jitcode_line_info->Lookup(event->user_data, hash, false);
+ CHECK(entry != NULL);
+ delete reinterpret_cast<DummyJitCodeLineInfo*>(event->user_data);
+ }
+ break;
+
+ case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
+ CHECK(event->user_data != NULL);
+ uint32_t hash = i::ComputePointerHash(event->user_data);
+ i::HashMap::Entry* entry =
+ jitcode_line_info->Lookup(event->user_data, hash, false);
+ CHECK(entry != NULL);
+ }
+ break;
+
default:
// Impossible event.
CHECK(false);
@@ -11396,16 +11748,13 @@ static void event_handler(const v8::JitCodeEvent* event) {
}
-// Implemented in the test-alloc.cc test suite.
-void SimulateFullSpace(i::PagedSpace* space);
-
-
static bool MatchPointers(void* key1, void* key2) {
return key1 == key2;
}
TEST(SetJitCodeEventHandler) {
+ i::FLAG_stress_compaction = true;
const char* script =
"function bar() {"
" var sum = 0;"
@@ -11422,29 +11771,32 @@ TEST(SetJitCodeEventHandler) {
isolate->Enter();
{
+ v8::HandleScope scope;
i::HashMap code(MatchPointers);
code_map = &code;
+ i::HashMap lineinfo(MatchPointers);
+ jitcode_line_info = &lineinfo;
+
saw_bar = 0;
move_events = 0;
- i::FLAG_stress_compaction = true;
V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
- v8::HandleScope scope;
// Generate new code objects sparsely distributed across several
// different fragmented code-space pages.
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
LocalContext env;
+ i::AlwaysAllocateScope always_allocate;
+ SimulateFullSpace(HEAP->code_space());
+ CompileRun(script);
- v8::Handle<v8::Script> compiled_script;
- {
- i::AlwaysAllocateScope always_allocate;
- SimulateFullSpace(HEAP->code_space());
- compiled_script = v8_compile(script);
- }
- compiled_script->Run();
+ // Keep a strong reference to the code object in the handle scope.
+ i::Handle<i::Code> bar_code(i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()->Get(v8_str("bar"))))->code());
+ i::Handle<i::Code> foo_code(i::Handle<i::JSFunction>::cast(
+ v8::Utils::OpenHandle(*env->Global()->Get(v8_str("foo"))))->code());
// Clear the compilation cache to get more wastage.
ISOLATE->compilation_cache()->Clear();
@@ -11453,11 +11805,13 @@ TEST(SetJitCodeEventHandler) {
// Force code movement.
HEAP->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
+ V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+
CHECK_LE(kIterations, saw_bar);
- CHECK_NE(0, move_events);
+ CHECK_LT(0, move_events);
code_map = NULL;
- V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+ jitcode_line_info = NULL;
}
isolate->Exit();
@@ -11478,16 +11832,20 @@ TEST(SetJitCodeEventHandler) {
i::HashMap code(MatchPointers);
code_map = &code;
+ i::HashMap lineinfo(MatchPointers);
+ jitcode_line_info = &lineinfo;
+
V8::SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting, event_handler);
V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
- code_map = NULL;
-
+ jitcode_line_info = NULL;
// We expect that we got some events. Note that if we could get code removal
// notifications, we could compare two collections, one created by listening
// from the time of creation of an isolate, and the other by subscribing
// with EnumExisting.
- CHECK_NE(0, code.occupancy());
+ CHECK_LT(0, code.occupancy());
+
+ code_map = NULL;
}
isolate->Exit();
@@ -11515,7 +11873,7 @@ THREADED_TEST(DisposeEnteredContext) {
LocalContext outer;
{ v8::Persistent<v8::Context> inner = v8::Context::New();
inner->Enter();
- inner.Dispose();
+ inner.Dispose(inner->GetIsolate());
inner.Clear();
inner->Exit();
}
@@ -11528,12 +11886,14 @@ THREADED_TEST(DisposeEnteredContext) {
THREADED_TEST(Regress54) {
v8::HandleScope outer;
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
static v8::Persistent<v8::ObjectTemplate> templ;
if (templ.IsEmpty()) {
v8::HandleScope inner;
v8::Handle<v8::ObjectTemplate> local = v8::ObjectTemplate::New();
local->SetInternalFieldCount(1);
- templ = v8::Persistent<v8::ObjectTemplate>::New(inner.Close(local));
+ templ =
+ v8::Persistent<v8::ObjectTemplate>::New(isolate, inner.Close(local));
}
v8::Handle<v8::Object> result = templ->NewInstance();
CHECK_EQ(1, result->InternalFieldCount());
@@ -11922,8 +12282,8 @@ static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
if (!name->IsString()) return false;
i::Handle<i::String> name_handle =
v8::Utils::OpenHandle(String::Cast(*name));
- return !name_handle->IsEqualTo(i::CStrVector(kPropertyA))
- && !name_handle->IsEqualTo(i::CStrVector(kPropertyH));
+ return !name_handle->IsUtf8EqualTo(i::CStrVector(kPropertyA))
+ && !name_handle->IsUtf8EqualTo(i::CStrVector(kPropertyH));
}
@@ -12222,8 +12582,8 @@ THREADED_TEST(CrossContextNew) {
context1->Exit();
// Dispose the contexts to allow them to be garbage collected.
- context0.Dispose();
- context1.Dispose();
+ context0.Dispose(context0->GetIsolate());
+ context1.Dispose(context1->GetIsolate());
}
@@ -12243,7 +12603,7 @@ class RegExpInterruptTest {
LongRunningRegExp();
{
- v8::Unlocker unlock;
+ v8::Unlocker unlock(CcTest::default_isolate());
gc_thread.Join();
}
v8::Locker::StopPreemption();
@@ -12270,7 +12630,7 @@ class RegExpInterruptTest {
block_->Wait();
while (gc_during_regexp_ < kRequiredGCs) {
{
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
// TODO(lrn): Perhaps create some garbage before collecting.
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
gc_count_++;
@@ -12330,7 +12690,7 @@ class RegExpInterruptTest {
// Test that a regular expression execution can be interrupted and
// survive a garbage collection.
TEST(RegExpInterruption) {
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
v8::V8::Initialize();
v8::HandleScope scope;
Local<Context> local_env;
@@ -12366,7 +12726,7 @@ class ApplyInterruptTest {
LongRunningApply();
{
- v8::Unlocker unlock;
+ v8::Unlocker unlock(CcTest::default_isolate());
gc_thread.Join();
}
v8::Locker::StopPreemption();
@@ -12393,7 +12753,7 @@ class ApplyInterruptTest {
block_->Wait();
while (gc_during_apply_ < kRequiredGCs) {
{
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
gc_count_++;
}
@@ -12439,7 +12799,7 @@ class ApplyInterruptTest {
// Test that nothing bad happens if we get a preemption just when we were
// about to do an apply().
TEST(ApplyInterruption) {
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
v8::V8::Initialize();
v8::HandleScope scope;
Local<Context> local_env;
@@ -12521,8 +12881,8 @@ static void MorphAString(i::String* string,
AsciiVectorResource* ascii_resource,
UC16VectorResource* uc16_resource) {
CHECK(i::StringShape(string).IsExternal());
- if (string->IsAsciiRepresentation()) {
- // Check old map is not symbol or long.
+ if (string->IsOneByteRepresentation()) {
+ // Check old map is not internalized or long.
CHECK(string->map() == HEAP->external_ascii_string_map());
// Morph external string to be TwoByte string.
string->set_map(HEAP->external_string_map());
@@ -12530,7 +12890,7 @@ static void MorphAString(i::String* string,
i::ExternalTwoByteString::cast(string);
morphed->set_resource(uc16_resource);
} else {
- // Check old map is not symbol or long.
+ // Check old map is not internalized or long.
CHECK(string->map() == HEAP->external_string_map());
// Morph external string to be ASCII string.
string->set_map(HEAP->external_ascii_string_map());
@@ -12570,8 +12930,10 @@ THREADED_TEST(MorphCompositeStringTest) {
"var slice = lhs.substring(1, lhs.length - 1);"
"var slice_on_cons = (lhs + rhs).substring(1, lhs.length *2 - 1);");
+#ifndef ENABLE_LATIN_1
CHECK(!lhs->MayContainNonAscii());
CHECK(!rhs->MayContainNonAscii());
+#endif
MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
@@ -12675,7 +13037,7 @@ class RegExpStringModificationTest {
v8::Locker::StartPreemption(1);
LongRunningRegExp();
{
- v8::Unlocker unlock;
+ v8::Unlocker unlock(CcTest::default_isolate());
morph_thread.Join();
}
v8::Locker::StopPreemption();
@@ -12704,7 +13066,7 @@ class RegExpStringModificationTest {
while (morphs_during_regexp_ < kRequiredModifications &&
morphs_ < kMaxModifications) {
{
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
// Swap string between ascii and two-byte representation.
i::String* string = *input_;
MorphAString(string, &ascii_resource_, &uc16_resource_);
@@ -12752,7 +13114,7 @@ class RegExpStringModificationTest {
// Test that a regular expression execution can be interrupted and
// the string changed without failing.
TEST(RegExpStringModification) {
- v8::Locker lock;
+ v8::Locker lock(CcTest::default_isolate());
v8::V8::Initialize();
v8::HandleScope scope;
Local<Context> local_env;
@@ -13036,9 +13398,9 @@ TEST(InlinedFunctionAcrossContexts) {
"ReferenceError: G is not defined");
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose();
+ ctx1.Dispose(ctx1->GetIsolate());
}
- ctx2.Dispose();
+ ctx2.Dispose(ctx2->GetIsolate());
}
@@ -13097,9 +13459,9 @@ THREADED_TEST(GetCallingContext) {
calling_context2->Exit();
// Dispose the contexts to allow them to be garbage collected.
- calling_context0.Dispose();
- calling_context1.Dispose();
- calling_context2.Dispose();
+ calling_context0.Dispose(calling_context0->GetIsolate());
+ calling_context1.Dispose(calling_context1->GetIsolate());
+ calling_context2.Dispose(calling_context2->GetIsolate());
calling_context0.Clear();
calling_context1.Clear();
calling_context2.Clear();
@@ -13233,7 +13595,8 @@ THREADED_TEST(PixelArray) {
"sum;");
CHECK_EQ(28, result->Int32Value());
- i::Handle<i::Smi> value(i::Smi::FromInt(2));
+ i::Handle<i::Smi> value(i::Smi::FromInt(2),
+ reinterpret_cast<i::Isolate*>(context->GetIsolate()));
i::Handle<i::Object> no_failure;
no_failure =
i::JSObject::SetElement(jsobj, 1, value, NONE, i::kNonStrictMode);
@@ -14821,7 +15184,7 @@ TEST(Regress2107) {
ctx->Enter();
CreateGarbageInOldSpace();
ctx->Exit();
- ctx.Dispose();
+ ctx.Dispose(ctx->GetIsolate());
v8::V8::ContextDisposedNotification();
v8::V8::IdleNotification(kLongIdlePauseInMs);
}
@@ -14885,7 +15248,7 @@ TEST(SetResourceConstraints) {
TEST(SetResourceConstraintsInThread) {
uint32_t* set_limit;
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
static const int K = 1024;
set_limit = ComputeStackLimit(128 * K);
@@ -14906,7 +15269,7 @@ TEST(SetResourceConstraintsInThread) {
CHECK(stack_limit == set_limit);
}
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
CHECK(stack_limit == set_limit);
}
}
@@ -14918,7 +15281,7 @@ THREADED_TEST(GetHeapStatistics) {
v8::HeapStatistics heap_statistics;
CHECK_EQ(static_cast<int>(heap_statistics.total_heap_size()), 0);
CHECK_EQ(static_cast<int>(heap_statistics.used_heap_size()), 0);
- v8::V8::GetHeapStatistics(&heap_statistics);
+ c1->GetIsolate()->GetHeapStatistics(&heap_statistics);
CHECK_NE(static_cast<int>(heap_statistics.total_heap_size()), 0);
CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0);
}
@@ -14981,8 +15344,8 @@ TEST(VisitExternalStrings) {
HEAP->CollectAllAvailableGarbage(); // Tenure string.
// Turn into a symbol.
i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
- CHECK(!HEAP->LookupSymbol(*string3_i)->IsFailure());
- CHECK(string3_i->IsSymbol());
+ CHECK(!HEAP->InternalizeString(*string3_i)->IsFailure());
+ CHECK(string3_i->IsInternalizedString());
// We need to add usages for string* to avoid warnings in GCC 4.7
CHECK(string0->IsExternal());
@@ -15170,11 +15533,11 @@ TEST(Regress528) {
context->Enter();
Local<v8::String> obj = v8::String::New("");
- context->SetData(obj);
+ context->SetEmbedderData(0, obj);
CompileRun(source_simple);
context->Exit();
}
- context.Dispose();
+ context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -15197,7 +15560,7 @@ TEST(Regress528) {
CompileRun(source_eval);
context->Exit();
}
- context.Dispose();
+ context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -15225,7 +15588,7 @@ TEST(Regress528) {
CHECK_EQ(1, message->GetLineNumber());
context->Exit();
}
- context.Dispose();
+ context.Dispose(context->GetIsolate());
v8::V8::ContextDisposedNotification();
for (gc_count = 1; gc_count < 10; gc_count++) {
other_context->Enter();
@@ -15237,7 +15600,7 @@ TEST(Regress528) {
CHECK_GE(2, gc_count);
CHECK_EQ((snapshot_enabled ? 2 : 1), GetGlobalObjectsCount());
- other_context.Dispose();
+ other_context.Dispose(other_context->GetIsolate());
v8::V8::ContextDisposedNotification();
}
@@ -15681,13 +16044,13 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
CHECK(result->IsString());
i::Handle<i::String> string = v8::Utils::OpenHandle(String::Cast(*result));
int length = string->length();
- CHECK(string->IsAsciiRepresentation());
+ CHECK(string->IsOneByteRepresentation());
FlattenString(string);
i::Handle<i::String> flat_string = FlattenGetString(string);
- CHECK(string->IsAsciiRepresentation());
- CHECK(flat_string->IsAsciiRepresentation());
+ CHECK(string->IsOneByteRepresentation());
+ CHECK(flat_string->IsOneByteRepresentation());
// Create external resource.
uint16_t* uc16_buffer = new uint16_t[length + 1];
@@ -15706,7 +16069,7 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
// ASCII characters). This is a valid sequence of steps, and it can happen
// in real pages.
- CHECK(string->IsAsciiRepresentation());
+ CHECK(string->IsOneByteRepresentation());
i::ConsString* cons = i::ConsString::cast(*string);
CHECK_EQ(0, cons->second()->length());
CHECK(cons->first()->IsTwoByteRepresentation());
@@ -16027,10 +16390,10 @@ TEST(RunTwoIsolatesOnSingleThread) {
{
v8::Isolate::Scope iscope(isolate2);
- context2.Dispose();
+ context2.Dispose(context2->GetIsolate());
}
- context1.Dispose();
+ context1.Dispose(context1->GetIsolate());
isolate1->Exit();
v8::V8::SetFatalErrorHandler(StoringErrorCallback);
@@ -16136,6 +16499,7 @@ TEST(IsolateDifferentContexts) {
CHECK(v->IsNumber());
CHECK_EQ(22, static_cast<int>(v->NumberValue()));
}
+ isolate->Dispose();
}
class InitDefaultIsolateThread : public v8::internal::Thread {
@@ -16404,7 +16768,7 @@ class Visitor42 : public v8::PersistentHandleVisitor {
CHECK(value->IsObject());
v8::Persistent<v8::Object> visited =
v8::Persistent<v8::Object>::Cast(value);
- CHECK_EQ(42, visited.WrapperClassId());
+ CHECK_EQ(42, visited.WrapperClassId(v8::Isolate::GetCurrent()));
CHECK_EQ(object_, visited);
++counter_;
}
@@ -16418,17 +16782,58 @@ class Visitor42 : public v8::PersistentHandleVisitor {
TEST(PersistentHandleVisitor) {
v8::HandleScope scope;
LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
v8::Persistent<v8::Object> object =
- v8::Persistent<v8::Object>::New(v8::Object::New());
- CHECK_EQ(0, object.WrapperClassId());
- object.SetWrapperClassId(42);
- CHECK_EQ(42, object.WrapperClassId());
+ v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ CHECK_EQ(0, object.WrapperClassId(isolate));
+ object.SetWrapperClassId(isolate, 42);
+ CHECK_EQ(42, object.WrapperClassId(isolate));
Visitor42 visitor(object);
v8::V8::VisitHandlesWithClassIds(&visitor);
CHECK_EQ(1, visitor.counter_);
- object.Dispose();
+ object.Dispose(isolate);
+}
+
+
+TEST(WrapperClassId) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Persistent<v8::Object> object =
+ v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ CHECK_EQ(0, object.WrapperClassId(isolate));
+ object.SetWrapperClassId(isolate, 65535);
+ CHECK_EQ(65535, object.WrapperClassId(isolate));
+ object.Dispose(isolate);
+}
+
+
+TEST(PersistentHandleInNewSpaceVisitor) {
+ v8::HandleScope scope;
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::Persistent<v8::Object> object1 =
+ v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ CHECK_EQ(0, object1.WrapperClassId(isolate));
+ object1.SetWrapperClassId(isolate, 42);
+ CHECK_EQ(42, object1.WrapperClassId(isolate));
+
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ v8::Persistent<v8::Object> object2 =
+ v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
+ CHECK_EQ(0, object2.WrapperClassId(isolate));
+ object2.SetWrapperClassId(isolate, 42);
+ CHECK_EQ(42, object2.WrapperClassId(isolate));
+
+ Visitor42 visitor(object2);
+ v8::V8::VisitHandlesForPartialDependence(isolate, &visitor);
+ CHECK_EQ(1, visitor.counter_);
+
+ object1.Dispose(isolate);
+ object2.Dispose(isolate);
}
@@ -16658,9 +17063,9 @@ THREADED_TEST(CreationContext) {
CheckContextId(instance2, 2);
}
- context1.Dispose();
- context2.Dispose();
- context3.Dispose();
+ context1.Dispose(context1->GetIsolate());
+ context2.Dispose(context2->GetIsolate());
+ context3.Dispose(context3->GetIsolate());
}
@@ -16678,7 +17083,7 @@ THREADED_TEST(CreationContextOfJsFunction) {
CHECK(function->CreationContext() == context);
CheckContextId(function, 1);
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -16796,6 +17201,27 @@ TEST(HasOwnProperty) {
}
+TEST(IndexedInterceptorWithStringProto) {
+ v8::HandleScope scope;
+ Handle<ObjectTemplate> templ = ObjectTemplate::New();
+ templ->SetIndexedPropertyHandler(NULL,
+ NULL,
+ HasOwnPropertyIndexedPropertyQuery);
+ LocalContext context;
+ context->Global()->Set(v8_str("obj"), templ->NewInstance());
+ CompileRun("var s = new String('foobar'); obj.__proto__ = s;");
+ // These should be intercepted.
+ CHECK(CompileRun("42 in obj")->BooleanValue());
+ CHECK(CompileRun("'42' in obj")->BooleanValue());
+ // These should fall through to the String prototype.
+ CHECK(CompileRun("0 in obj")->BooleanValue());
+ CHECK(CompileRun("'0' in obj")->BooleanValue());
+ // And these should both fail.
+ CHECK(!CompileRun("32 in obj")->BooleanValue());
+ CHECK(!CompileRun("'32' in obj")->BooleanValue());
+}
+
+
void CheckCodeGenerationAllowed() {
Handle<Value> result = CompileRun("eval('42')");
CHECK_EQ(42, result->Int32Value());
@@ -17054,7 +17480,7 @@ THREADED_TEST(Regress93759) {
Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
CHECK(result6->Equals(Undefined()));
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -17182,7 +17608,7 @@ THREADED_TEST(ForeignFunctionReceiver) {
// Calling with no base.
TestReceiver(o, context->Global(), "(1,func)()");
- foreign_context.Dispose();
+ foreign_context.Dispose(foreign_context->GetIsolate());
}
@@ -17422,7 +17848,7 @@ TEST(StringEmpty) {
v8::HandleScope scope;
LocalContext context;
v8::Isolate* isolate = v8::Isolate::GetCurrent();
- i::Handle<i::Object> empty_string = FACTORY->empty_symbol();
+ i::Handle<i::Object> empty_string = FACTORY->empty_string();
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty()) == *empty_string);
CHECK(*v8::Utils::OpenHandle(*v8::String::Empty(isolate)) == *empty_string);
@@ -17883,7 +18309,6 @@ class ThreadInterruptTest {
private:
ThreadInterruptTest* test_;
- struct sigaction sa_;
};
i::Semaphore* sem_;
@@ -17894,4 +18319,5 @@ class ThreadInterruptTest {
THREADED_TEST(SemaphoreInterruption) {
ThreadInterruptTest().RunTest();
}
+
#endif // WIN32
diff --git a/src/3rdparty/v8/test/cctest/test-assembler-arm.cc b/src/3rdparty/v8/test/cctest/test-assembler-arm.cc
index cdab1b9..14bcb1a 100644
--- a/src/3rdparty/v8/test/cctest/test-assembler-arm.cc
+++ b/src/3rdparty/v8/test/cctest/test-assembler-arm.cc
@@ -59,17 +59,18 @@ TEST(0) {
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
__ add(r0, r0, Operand(r1));
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -85,11 +86,12 @@ TEST(1) {
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
Label L, C;
__ mov(r1, Operand(r0));
- __ mov(r0, Operand(0, RelocInfo::NONE));
+ __ mov(r0, Operand::Zero());
__ b(&C);
__ bind(&L);
@@ -97,16 +99,16 @@ TEST(1) {
__ sub(r1, r1, Operand(1));
__ bind(&C);
- __ teq(r1, Operand(0, RelocInfo::NONE));
+ __ teq(r1, Operand::Zero());
__ b(ne, &L);
__ mov(pc, Operand(lr));
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -122,7 +124,8 @@ TEST(2) {
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
Label L, C;
__ mov(r1, Operand(r0));
@@ -134,7 +137,7 @@ TEST(2) {
__ sub(r1, r1, Operand(1));
__ bind(&C);
- __ teq(r1, Operand(0, RelocInfo::NONE));
+ __ teq(r1, Operand::Zero());
__ b(ne, &L);
__ mov(pc, Operand(lr));
@@ -149,10 +152,10 @@ TEST(2) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -175,7 +178,8 @@ TEST(3) {
} T;
T t;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
Label L, C;
__ mov(ip, Operand(sp));
@@ -197,10 +201,10 @@ TEST(3) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -242,7 +246,8 @@ TEST(4) {
// Create a function that accepts &t, and loads, manipulates, and stores
// the doubles and floats.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
Label L, C;
@@ -259,6 +264,9 @@ TEST(4) {
__ vadd(d5, d6, d7);
__ vstr(d5, r4, OFFSET_OF(T, c));
+ __ vmla(d5, d6, d7);
+ __ vmls(d5, d5, d6);
+
__ vmov(r2, r3, d5);
__ vmov(d4, r2, r3);
__ vstr(d4, r4, OFFSET_OF(T, b));
@@ -312,10 +320,10 @@ TEST(4) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -347,7 +355,7 @@ TEST(4) {
CHECK_EQ(1.0, t.e);
CHECK_EQ(1.000000059604644775390625, t.d);
CHECK_EQ(4.25, t.c);
- CHECK_EQ(4.25, t.b);
+ CHECK_EQ(-4.1875, t.b);
CHECK_EQ(1.5, t.a);
}
}
@@ -358,7 +366,8 @@ TEST(5) {
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -372,10 +381,10 @@ TEST(5) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -394,7 +403,8 @@ TEST(6) {
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
@@ -407,10 +417,10 @@ TEST(6) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -437,7 +447,8 @@ static void TestRoundingMode(VCVTTypes types,
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
@@ -483,10 +494,10 @@ static void TestRoundingMode(VCVTTypes types,
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -640,7 +651,8 @@ TEST(8) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -669,10 +681,10 @@ TEST(8) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -751,7 +763,8 @@ TEST(9) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -784,10 +797,10 @@ TEST(9) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -866,7 +879,8 @@ TEST(10) {
// Create a function that uses vldm/vstm to move some double and
// single precision values around in memory.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -895,10 +909,10 @@ TEST(10) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -962,7 +976,8 @@ TEST(11) {
i.a = 0xabcd0001;
i.b = 0xabcd0000;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
// Test HeapObject untagging.
__ ldr(r1, MemOperand(r0, OFFSET_OF(I, a)));
@@ -977,13 +992,13 @@ TEST(11) {
// Test corner cases.
__ mov(r1, Operand(0xffffffff));
- __ mov(r2, Operand(0));
+ __ mov(r2, Operand::Zero());
__ mov(r3, Operand(r1, ASR, 1), SetCC); // Set the carry.
__ adc(r3, r1, Operand(r2));
__ str(r3, MemOperand(r0, OFFSET_OF(I, c)));
__ mov(r1, Operand(0xffffffff));
- __ mov(r2, Operand(0));
+ __ mov(r2, Operand::Zero());
__ mov(r3, Operand(r2, ASR, 1), SetCC); // Unset the carry.
__ adc(r3, r1, Operand(r2));
__ str(r3, MemOperand(r0, OFFSET_OF(I, d)));
@@ -992,10 +1007,10 @@ TEST(11) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef DEBUG
Code::cast(code)->Print();
@@ -1024,4 +1039,123 @@ TEST(12) {
__ nop();
}
+
+TEST(13) {
+ // Test VFP instructions using registers d16-d31.
+ InitializeVM();
+ v8::HandleScope scope;
+
+ if (!CpuFeatures::IsSupported(VFP32DREGS)) {
+ return;
+ }
+
+ typedef struct {
+ double a;
+ double b;
+ double c;
+ double x;
+ double y;
+ double z;
+ double i;
+ double j;
+ double k;
+ } T;
+ T t;
+
+ // Create a function that accepts &t, and loads, manipulates, and stores
+ // the doubles and floats.
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
+ Label L, C;
+
+
+ if (CpuFeatures::IsSupported(VFP3)) {
+ CpuFeatures::Scope scope(VFP3);
+
+ __ stm(db_w, sp, r4.bit() | lr.bit());
+
+ // Load a, b, c into d16, d17, d18.
+ __ mov(r4, Operand(r0));
+ __ vldr(d16, r4, OFFSET_OF(T, a));
+ __ vldr(d17, r4, OFFSET_OF(T, b));
+ __ vldr(d18, r4, OFFSET_OF(T, c));
+
+ __ vneg(d25, d16);
+ __ vadd(d25, d25, d17);
+ __ vsub(d25, d25, d18);
+ __ vmul(d25, d25, d25);
+ __ vdiv(d25, d25, d18);
+
+ __ vmov(d16, d25);
+ __ vsqrt(d17, d25);
+ __ vneg(d17, d17);
+ __ vabs(d17, d17);
+ __ vmla(d18, d16, d17);
+
+ // Store d16, d17, d18 into a, b, c.
+ __ mov(r4, Operand(r0));
+ __ vstr(d16, r4, OFFSET_OF(T, a));
+ __ vstr(d17, r4, OFFSET_OF(T, b));
+ __ vstr(d18, r4, OFFSET_OF(T, c));
+
+ // Load x, y, z into d29-d31.
+ __ add(r4, r0, Operand(OFFSET_OF(T, x)));
+ __ vldm(ia_w, r4, d29, d31);
+
+ // Swap d29 and d30 via r registers.
+ __ vmov(r1, r2, d29);
+ __ vmov(d29, d30);
+ __ vmov(d30, r1, r2);
+
+ // Convert to and from integer.
+ __ vcvt_s32_f64(s1, d31);
+ __ vcvt_f64_u32(d31, s1);
+
+ // Store d29-d31 into x, y, z.
+ __ add(r4, r0, Operand(OFFSET_OF(T, x)));
+ __ vstm(ia_w, r4, d29, d31);
+
+ // Move constants into d20, d21, d22 and store into i, j, k.
+ __ vmov(d20, 14.7610017472335499);
+ __ vmov(d21, 16.0);
+ __ mov(r1, Operand(372106121));
+ __ mov(r2, Operand(1079146608));
+ __ vmov(d22, VmovIndexLo, r1);
+ __ vmov(d22, VmovIndexHi, r2);
+ __ add(r4, r0, Operand(OFFSET_OF(T, i)));
+ __ vstm(ia_w, r4, d20, d22);
+
+ __ ldm(ia_w, sp, r4.bit() | pc.bit());
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5;
+ t.b = 2.75;
+ t.c = 17.17;
+ t.x = 1.5;
+ t.y = 2.75;
+ t.z = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(14.7610017472335499, t.a);
+ CHECK_EQ(3.84200491244266251, t.b);
+ CHECK_EQ(73.8818412254460241, t.c);
+ CHECK_EQ(2.75, t.x);
+ CHECK_EQ(1.5, t.y);
+ CHECK_EQ(17.0, t.z);
+ CHECK_EQ(14.7610017472335499, t.i);
+ CHECK_EQ(16.0, t.j);
+ CHECK_EQ(73.8818412254460241, t.k);
+ }
+}
+
#undef __
diff --git a/src/3rdparty/v8/test/cctest/test-assembler-ia32.cc b/src/3rdparty/v8/test/cctest/test-assembler-ia32.cc
index 815e618..22d9fb2 100644
--- a/src/3rdparty/v8/test/cctest/test-assembler-ia32.cc
+++ b/src/3rdparty/v8/test/cctest/test-assembler-ia32.cc
@@ -61,7 +61,8 @@ TEST(AssemblerIa320) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
__ add(eax, Operand(esp, 8));
@@ -69,10 +70,10 @@ TEST(AssemblerIa320) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
@@ -89,7 +90,8 @@ TEST(AssemblerIa321) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
@@ -107,10 +109,10 @@ TEST(AssemblerIa321) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
@@ -127,7 +129,8 @@ TEST(AssemblerIa322) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
Label L, C;
__ mov(edx, Operand(esp, 4));
@@ -149,10 +152,10 @@ TEST(AssemblerIa322) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
@@ -173,7 +176,8 @@ TEST(AssemblerIa323) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
CHECK(CpuFeatures::IsSupported(SSE2));
{ CpuFeatures::Scope fscope(SSE2);
@@ -183,10 +187,10 @@ TEST(AssemblerIa323) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
// don't print the code - our disassembler can't handle cvttss2si
// instead print bytes
Disassembler::Dump(stdout,
@@ -208,7 +212,8 @@ TEST(AssemblerIa324) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
@@ -217,10 +222,10 @@ TEST(AssemblerIa324) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
// don't print the code - our disassembler can't handle cvttsd2si
// instead print bytes
Disassembler::Dump(stdout,
@@ -239,17 +244,18 @@ TEST(AssemblerIa325) {
v8::HandleScope scope;
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
- __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE));
+ __ mov(eax, Operand(reinterpret_cast<intptr_t>(&baz), RelocInfo::NONE32));
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
F0 f = FUNCTION_CAST<F0>(code->entry());
int res = f();
CHECK_EQ(42, res);
@@ -266,7 +272,8 @@ TEST(AssemblerIa326) {
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
__ movdbl(xmm0, Operand(esp, 1 * kPointerSize));
__ movdbl(xmm1, Operand(esp, 3 * kPointerSize));
@@ -283,10 +290,10 @@ TEST(AssemblerIa326) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
#ifdef DEBUG
::printf("\n---\n");
// don't print the code - our disassembler can't handle SSE instructions
@@ -312,7 +319,8 @@ TEST(AssemblerIa328) {
CHECK(CpuFeatures::IsSupported(SSE2));
CpuFeatures::Scope fscope(SSE2);
v8::internal::byte buffer[256];
- Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof buffer);
__ mov(eax, Operand(esp, 4));
__ cvtsi2sd(xmm0, eax);
// Copy xmm0 to st(0) using eight bytes of stack.
@@ -323,10 +331,10 @@ TEST(AssemblerIa328) {
__ ret(0);
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
@@ -345,7 +353,8 @@ TEST(AssemblerIa329) {
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[256];
- MacroAssembler assm(Isolate::Current(), buffer, sizeof buffer);
+ Isolate* isolate = Isolate::Current();
+ MacroAssembler assm(isolate, buffer, sizeof buffer);
enum { kEqual = 0, kGreater = 1, kLess = 2, kNaN = 3, kUndefined = 4 };
Label equal_l, less_l, greater_l, nan_l;
__ fld_d(Operand(esp, 3 * kPointerSize));
@@ -378,10 +387,10 @@ TEST(AssemblerIa329) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
@@ -399,7 +408,8 @@ TEST(AssemblerIa3210) {
// Test chaining of label usages within instructions (issue 1644).
InitializeVM();
v8::HandleScope scope;
- Assembler assm(Isolate::Current(), NULL, 0);
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, NULL, 0);
Label target;
__ j(equal, &target);
@@ -413,7 +423,8 @@ TEST(AssemblerMultiByteNop) {
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[1024];
- Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof(buffer));
__ push(ebx);
__ push(ecx);
__ push(edx);
@@ -462,10 +473,10 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -474,6 +485,94 @@ TEST(AssemblerMultiByteNop) {
}
+#ifdef __GNUC__
+#define ELEMENT_COUNT 4
+
+void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+ HandleScope scope(isolate);
+
+ CHECK(args[0]->IsArray());
+ v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
+ CHECK_EQ(ELEMENT_COUNT, vec->Length());
+
+ v8::internal::byte buffer[256];
+ Assembler assm(isolate, buffer, sizeof buffer);
+
+ ASSERT(CpuFeatures::IsSupported(SSE2));
+ CpuFeatureScope fscope(&assm, SSE2);
+
+ // Remove return address from the stack for fix stack frame alignment.
+ __ pop(ecx);
+
+ // Store input vector on the stack.
+ for (int i = 0; i < ELEMENT_COUNT; ++i) {
+ __ push(Immediate(vec->Get(i)->Int32Value()));
+ }
+
+ // Read vector into a xmm register.
+ __ pxor(xmm0, xmm0);
+ __ movdqa(xmm0, Operand(esp, 0));
+ // Create mask and store it in the return register.
+ __ movmskps(eax, xmm0);
+
+ // Remove unused data from the stack.
+ __ add(esp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
+ // Restore return address.
+ __ push(ecx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(Code::cast(code)->entry());
+ int res = f();
+ args.GetReturnValue().Set(v8::Integer::New(res));
+}
+
+TEST(StackAlignmentForSSE2) {
+ CcTest::InitializeVM();
+ if (!CpuFeatures::IsSupported(SSE2)) return;
+
+ CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo(vec) {"
+ " return do_sse2(vec);"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
+ v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ v8_vec->Set(i, v8_num(vec[i]));
+ }
+
+ v8::Local<v8::Value> args[] = { v8_vec };
+ v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+
+ // The mask should be 0b1000.
+ CHECK_EQ(8, result->Int32Value());
+}
+
+#undef ELEMENT_COUNT
+#endif // __GNUC__
#undef __
diff --git a/src/3rdparty/v8/test/cctest/test-assembler-mips.cc b/src/3rdparty/v8/test/cctest/test-assembler-mips.cc
index 6985433..2abe6db 100644
--- a/src/3rdparty/v8/test/cctest/test-assembler-mips.cc
+++ b/src/3rdparty/v8/test/cctest/test-assembler-mips.cc
@@ -76,7 +76,7 @@ TEST(MIPS0) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
@@ -114,7 +114,7 @@ TEST(MIPS1) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F1 f = FUNCTION_CAST<F1>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
@@ -254,7 +254,7 @@ TEST(MIPS2) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F2 f = FUNCTION_CAST<F2>(Code::cast(code)->entry());
int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
@@ -276,6 +276,8 @@ TEST(MIPS3) {
double e;
double f;
double g;
+ double h;
+ double i;
} T;
T t;
@@ -312,6 +314,13 @@ TEST(MIPS3) {
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
// g = sqrt(f) = 10.97451593465515908537
+ if (kArchVariant == kMips32r2) {
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
+ __ madd_d(f14, f6, f4, f6);
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
+ }
+
__ jr(ra);
__ nop();
@@ -320,7 +329,7 @@ TEST(MIPS3) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.a = 1.5e14;
@@ -329,6 +338,8 @@ TEST(MIPS3) {
t.d = 0.0;
t.e = 0.0;
t.f = 0.0;
+ t.h = 1.5;
+ t.i = 2.75;
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
CHECK_EQ(1.5e14, t.a);
@@ -338,6 +349,7 @@ TEST(MIPS3) {
CHECK_EQ(1.8066e16, t.e);
CHECK_EQ(120.44, t.f);
CHECK_EQ(10.97451593465515908537, t.g);
+ CHECK_EQ(6.875, t.h);
}
}
@@ -386,7 +398,7 @@ TEST(MIPS4) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.a = 1.5e22;
@@ -455,7 +467,7 @@ TEST(MIPS5) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.a = 1.5e4;
@@ -528,7 +540,7 @@ TEST(MIPS6) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.ui = 0x11223344;
@@ -607,7 +619,7 @@ TEST(MIPS7) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.a = 1.5e14;
@@ -706,7 +718,7 @@ TEST(MIPS8) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.input = 0x12345678;
@@ -753,7 +765,7 @@ TEST(MIPS9) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
}
@@ -814,7 +826,7 @@ TEST(MIPS10) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.a = 2.147483647e9; // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
@@ -946,7 +958,7 @@ TEST(MIPS11) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.reg_init = 0xaabbccdd;
@@ -1050,7 +1062,7 @@ TEST(MIPS12) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
t.x = 1;
@@ -1109,7 +1121,7 @@ TEST(MIPS13) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
@@ -1233,7 +1245,7 @@ TEST(MIPS14) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
diff --git a/src/3rdparty/v8/test/cctest/test-assembler-x64.cc b/src/3rdparty/v8/test/cctest/test-assembler-x64.cc
index d81923f..f134f6c 100644
--- a/src/3rdparty/v8/test/cctest/test-assembler-x64.cc
+++ b/src/3rdparty/v8/test/cctest/test-assembler-x64.cc
@@ -62,6 +62,7 @@ using v8::internal::rdx;
using v8::internal::rsi;
using v8::internal::rsp;
using v8::internal::times_1;
+using v8::internal::xmm0;
// Test the x64 assembler by compiling some simple functions into
// a buffer and executing them. These tests do not initialize the
@@ -376,7 +377,8 @@ TEST(AssemblerMultiByteNop) {
InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[1024];
- Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof(buffer));
__ push(rbx);
__ push(rcx);
__ push(rdx);
@@ -425,11 +427,10 @@ TEST(AssemblerMultiByteNop) {
CodeDesc desc;
assm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
+ Code* code = Code::cast(isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- v8::internal::Handle<v8::internal::Object>(
- HEAP->undefined_value()))->ToObjectChecked());
+ v8::internal::Handle<Code>())->ToObjectChecked());
CHECK(code->IsCode());
F0 f = FUNCTION_CAST<F0>(code->entry());
@@ -438,6 +439,91 @@ TEST(AssemblerMultiByteNop) {
}
+#ifdef __GNUC__
+#define ELEMENT_COUNT 4
+
+void DoSSE2(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::internal::byte buffer[1024];
+
+ CHECK(args[0]->IsArray());
+ v8::Local<v8::Array> vec = v8::Local<v8::Array>::Cast(args[0]);
+ CHECK_EQ(ELEMENT_COUNT, vec->Length());
+
+ Isolate* isolate = Isolate::Current();
+ Assembler assm(isolate, buffer, sizeof(buffer));
+
+ // Remove return address from the stack for fix stack frame alignment.
+ __ pop(rcx);
+
+ // Store input vector on the stack.
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ __ movl(rax, Immediate(vec->Get(i)->Int32Value()));
+ __ shl(rax, Immediate(0x20));
+ __ or_(rax, Immediate(vec->Get(++i)->Int32Value()));
+ __ push(rax);
+ }
+
+ // Read vector into a xmm register.
+ __ xorps(xmm0, xmm0);
+ __ movdqa(xmm0, Operand(rsp, 0));
+ // Create mask and store it in the return register.
+ __ movmskps(rax, xmm0);
+
+ // Remove unused data from the stack.
+ __ addq(rsp, Immediate(ELEMENT_COUNT * sizeof(int32_t)));
+ // Restore return address.
+ __ push(rcx);
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ v8::internal::Handle<Code>())->ToObjectChecked());
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ args.GetReturnValue().Set(v8::Integer::New(res));
+}
+
+TEST(StackAlignmentForSSE2) {
+ CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("do_sse2"), v8::FunctionTemplate::New(DoSSE2));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo(vec) {"
+ " return do_sse2(vec);"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ int32_t vec[ELEMENT_COUNT] = { -1, 1, 1, 1 };
+ v8::Local<v8::Array> v8_vec = v8::Array::New(ELEMENT_COUNT);
+ for (int i = 0; i < ELEMENT_COUNT; i++) {
+ v8_vec->Set(i, v8_num(vec[i]));
+ }
+
+ v8::Local<v8::Value> args[] = { v8_vec };
+ v8::Local<v8::Value> result = foo->Call(global_object, 1, args);
+
+ // The mask should be 0b1000.
+ CHECK_EQ(8, result->Int32Value());
+}
+
+#undef ELEMENT_COUNT
+#endif // __GNUC__
#undef __
diff --git a/src/3rdparty/v8/test/cctest/test-compiler.cc b/src/3rdparty/v8/test/cctest/test-compiler.cc
index 7700a98..f6632d4 100644
--- a/src/3rdparty/v8/test/cctest/test-compiler.cc
+++ b/src/3rdparty/v8/test/cctest/test-compiler.cc
@@ -94,16 +94,19 @@ static void InitializeVM() {
static MaybeObject* GetGlobalProperty(const char* name) {
- Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
- return Isolate::Current()->context()->global_object()->GetProperty(*symbol);
+ Handle<String> internalized_name = FACTORY->InternalizeUtf8String(name);
+ return Isolate::Current()->context()->global_object()->GetProperty(
+ *internalized_name);
}
static void SetGlobalProperty(const char* name, Object* value) {
- Handle<Object> object(value);
- Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
- Handle<JSObject> global(Isolate::Current()->context()->global_object());
- SetProperty(global, symbol, object, NONE, kNonStrictMode);
+ Isolate* isolate = Isolate::Current();
+ Handle<Object> object(value, isolate);
+ Handle<String> internalized_name =
+ isolate->factory()->InternalizeUtf8String(name);
+ Handle<JSObject> global(isolate->context()->global_object());
+ SetProperty(isolate, global, internalized_name, object, NONE, kNonStrictMode);
}
@@ -264,11 +267,11 @@ TEST(UncaughtThrow) {
Handle<JSFunction> fun = Compile(source);
CHECK(!fun.is_null());
bool has_pending_exception;
- Handle<JSObject> global(Isolate::Current()->context()->global_object());
+ Isolate* isolate = fun->GetIsolate();
+ Handle<JSObject> global(isolate->context()->global_object());
Execution::Call(fun, global, 0, NULL, &has_pending_exception);
CHECK(has_pending_exception);
- CHECK_EQ(42.0, Isolate::Current()->pending_exception()->
- ToObjectChecked()->Number());
+ CHECK_EQ(42.0, isolate->pending_exception()->ToObjectChecked()->Number());
}
@@ -286,6 +289,7 @@ TEST(C2JSFrames) {
Handle<JSFunction> fun0 = Compile(source);
CHECK(!fun0.is_null());
+ Isolate* isolate = fun0->GetIsolate();
// Run the generated code to populate the global object with 'foo'.
bool has_pending_exception;
@@ -293,13 +297,16 @@ TEST(C2JSFrames) {
Execution::Call(fun0, global, 0, NULL, &has_pending_exception);
CHECK(!has_pending_exception);
- Object* foo_symbol = FACTORY->LookupAsciiSymbol("foo")->ToObjectChecked();
- MaybeObject* fun1_object = Isolate::Current()->context()->global_object()->
- GetProperty(String::cast(foo_symbol));
- Handle<Object> fun1(fun1_object->ToObjectChecked());
+ Object* foo_string =
+ FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("foo"))->
+ ToObjectChecked();
+ MaybeObject* fun1_object = isolate->context()->global_object()->
+ GetProperty(String::cast(foo_string));
+ Handle<Object> fun1(fun1_object->ToObjectChecked(), isolate);
CHECK(fun1->IsJSFunction());
- Handle<Object> argv[] = { FACTORY->LookupAsciiSymbol("hello") };
+ Handle<Object> argv[] =
+ { FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("hello")) };
Execution::Call(Handle<JSFunction>::cast(fun1),
global,
ARRAY_SIZE(argv),
diff --git a/src/3rdparty/v8/test/cctest/test-cpu-profiler.cc b/src/3rdparty/v8/test/cctest/test-cpu-profiler.cc
index b10e688..d294f19 100644
--- a/src/3rdparty/v8/test/cctest/test-cpu-profiler.cc
+++ b/src/3rdparty/v8/test/cctest/test-cpu-profiler.cc
@@ -20,7 +20,7 @@ using i::TokenEnumerator;
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 100);
processor.Start();
processor.Stop();
processor.Join();
@@ -77,21 +77,24 @@ class TestSetup {
TEST(CodeEvents) {
InitializeVM();
+ i::Isolate* isolate = i::Isolate::Current();
+ i::Heap* heap = isolate->heap();
+ i::Factory* factory = isolate->factory();
TestSetup test_setup;
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 100);
processor.Start();
// Enqueue code creation events.
- i::HandleScope scope;
+ i::HandleScope scope(isolate);
const char* aaa_str = "aaa";
- i::Handle<i::String> aaa_name = FACTORY->NewStringFromAscii(
+ i::Handle<i::String> aaa_name = factory->NewStringFromAscii(
i::Vector<const char>(aaa_str, i::StrLength(aaa_str)));
processor.CodeCreateEvent(i::Logger::FUNCTION_TAG,
*aaa_name,
- HEAP->empty_string(),
+ heap->empty_string(),
0,
ToAddress(0x1000),
0x100,
@@ -142,7 +145,7 @@ TEST(TickEvents) {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 100);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
@@ -232,7 +235,7 @@ TEST(Issue1398) {
CpuProfilesCollection profiles;
profiles.StartProfiling("", 1);
ProfileGenerator generator(&profiles);
- ProfilerEventsProcessor processor(&generator);
+ ProfilerEventsProcessor processor(&generator, NULL, 100);
processor.Start();
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
diff --git a/src/3rdparty/v8/test/cctest/test-debug.cc b/src/3rdparty/v8/test/cctest/test-debug.cc
index 3caeb1b..51eb203 100644
--- a/src/3rdparty/v8/test/cctest/test-debug.cc
+++ b/src/3rdparty/v8/test/cctest/test-debug.cc
@@ -137,13 +137,15 @@ class DebugLocalContext {
}
inline ~DebugLocalContext() {
context_->Exit();
- context_.Dispose();
+ context_.Dispose(context_->GetIsolate());
}
inline v8::Context* operator->() { return *context_; }
inline v8::Context* operator*() { return *context_; }
inline bool IsReady() { return !context_.IsEmpty(); }
void ExposeDebug() {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Isolate* isolate =
+ reinterpret_cast<v8::internal::Isolate*>(context_->GetIsolate());
+ v8::internal::Debug* debug = isolate->debug();
// Expose the debug context global object in the global object for testing.
debug->Load();
debug->debug_context()->set_security_token(
@@ -152,10 +154,11 @@ class DebugLocalContext {
Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
- FACTORY->LookupAsciiSymbol("debug");
- SetProperty(global, debug_string,
- Handle<Object>(debug->debug_context()->global_proxy()), DONT_ENUM,
- ::v8::internal::kNonStrictMode);
+ FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
+ SetProperty(isolate, global, debug_string,
+ Handle<Object>(debug->debug_context()->global_proxy(), isolate),
+ DONT_ENUM,
+ ::v8::internal::kNonStrictMode);
}
private:
@@ -197,10 +200,11 @@ static bool HasDebugInfo(v8::Handle<v8::Function> fun) {
// number.
static int SetBreakPoint(Handle<v8::internal::JSFunction> fun, int position) {
static int break_point = 0;
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Isolate* isolate = fun->GetIsolate();
+ v8::internal::Debug* debug = isolate->debug();
debug->SetBreakPoint(
fun,
- Handle<Object>(v8::internal::Smi::FromInt(++break_point)),
+ Handle<Object>(v8::internal::Smi::FromInt(++break_point), isolate),
&position);
return break_point;
}
@@ -281,9 +285,10 @@ static int SetScriptBreakPointByNameFromJS(const char* script_name,
// Clear a break point.
static void ClearBreakPoint(int break_point) {
- v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
+ v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
+ v8::internal::Debug* debug = isolate->debug();
debug->ClearBreakPoint(
- Handle<Object>(v8::internal::Smi::FromInt(break_point)));
+ Handle<Object>(v8::internal::Smi::FromInt(break_point), isolate));
}
@@ -425,7 +430,7 @@ void CheckDebuggerUnloaded(bool check_functions) {
HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
// Iterate the head and check that there are no debugger related objects left.
- HeapIterator iterator;
+ HeapIterator iterator(HEAP);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
CHECK(!obj->IsBreakPointInfo());
@@ -2330,8 +2335,8 @@ TEST(ScriptBreakPointNoRelocation) {
// Compile the script and call the function.
v8::ScriptOrigin origin(v8::String::New("test.html"), v8::Integer::New(0));
v8::Script::Compile(script1, &origin)->Run();
- v8::Local<v8::Function> f
- = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
+ v8::Local<v8::Function> f =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("f")));
f->Call(env->Global(), 0, NULL);
// Check that a break point was not hit
@@ -2348,8 +2353,8 @@ TEST(ScriptBreakPointNoRelocation) {
// Compile the script and call the new function
v8::Script::Compile(script2, &origin)->Run();
- v8::Local<v8::Function> g
- = v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
+ v8::Local<v8::Function> g =
+ v8::Local<v8::Function>::Cast(env->Global()->Get(v8::String::New("g")));
g->Call(env->Global(), 0, NULL);
// Check that a break point was not hit
@@ -4289,7 +4294,7 @@ TEST(NoBreakWhenBootstrapping) {
const char* extension_names[] = { "simpletest" };
v8::ExtensionConfiguration extensions(1, extension_names);
v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
// Check that no DebugBreak events occured during the context creation.
CHECK_EQ(0, break_point_hit_count);
@@ -6277,7 +6282,7 @@ static v8::Handle<v8::Value> expected_context_data;
// Check that the expected context is the one generating the debug event.
static void ContextCheckMessageHandler(const v8::Debug::Message& message) {
CHECK(message.GetEventContext() == expected_context);
- CHECK(message.GetEventContext()->GetData()->StrictEquals(
+ CHECK(message.GetEventContext()->GetEmbedderData(0)->StrictEquals(
expected_context_data));
message_handler_hit_count++;
@@ -6310,16 +6315,16 @@ TEST(ContextData) {
context_2 = v8::Context::New(NULL, global_template, global_object);
// Default data value is undefined.
- CHECK(context_1->GetData()->IsUndefined());
- CHECK(context_2->GetData()->IsUndefined());
+ CHECK(context_1->GetEmbedderData(0)->IsUndefined());
+ CHECK(context_2->GetEmbedderData(0)->IsUndefined());
// Set and check different data values.
v8::Handle<v8::String> data_1 = v8::String::New("1");
v8::Handle<v8::String> data_2 = v8::String::New("2");
- context_1->SetData(data_1);
- context_2->SetData(data_2);
- CHECK(context_1->GetData()->StrictEquals(data_1));
- CHECK(context_2->GetData()->StrictEquals(data_2));
+ context_1->SetEmbedderData(0, data_1);
+ context_2->SetEmbedderData(0, data_2);
+ CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
+ CHECK(context_2->GetEmbedderData(0)->StrictEquals(data_2));
// Simple test function which causes a break.
const char* source = "function f() { debugger; }";
@@ -6474,12 +6479,12 @@ static void ExecuteScriptForContextCheck() {
context_1 = v8::Context::New(NULL, global_template);
// Default data value is undefined.
- CHECK(context_1->GetData()->IsUndefined());
+ CHECK(context_1->GetEmbedderData(0)->IsUndefined());
// Set and check a data value.
v8::Handle<v8::String> data_1 = v8::String::New("1");
- context_1->SetData(data_1);
- CHECK(context_1->GetData()->StrictEquals(data_1));
+ context_1->SetEmbedderData(0, data_1);
+ CHECK(context_1->GetEmbedderData(0)->StrictEquals(data_1));
// Simple test function with eval that causes a break.
const char* source = "function f() { eval('debugger;'); }";
@@ -6520,7 +6525,7 @@ static int continue_command_send_count = 0;
static void DebugEvalContextCheckMessageHandler(
const v8::Debug::Message& message) {
CHECK(message.GetEventContext() == expected_context);
- CHECK(message.GetEventContext()->GetData()->StrictEquals(
+ CHECK(message.GetEventContext()->GetEmbedderData(0)->StrictEquals(
expected_context_data));
message_handler_hit_count++;
@@ -7127,7 +7132,7 @@ TEST(DebugEventContext) {
expected_context = v8::Context::New();
v8::Context::Scope context_scope(expected_context);
v8::Script::Compile(v8::String::New("(function(){debugger;})();"))->Run();
- expected_context.Dispose();
+ expected_context.Dispose(expected_context->GetIsolate());
expected_context.Clear();
v8::Debug::SetDebugEventListener(NULL);
expected_context_data = v8::Handle<v8::Value>();
diff --git a/src/3rdparty/v8/test/cctest/test-decls.cc b/src/3rdparty/v8/test/cctest/test-decls.cc
index 6fc6012..efdc394 100644
--- a/src/3rdparty/v8/test/cctest/test-decls.cc
+++ b/src/3rdparty/v8/test/cctest/test-decls.cc
@@ -53,7 +53,7 @@ class DeclarationContext {
virtual ~DeclarationContext() {
if (is_initialized_) {
context_->Exit();
- context_.Dispose();
+ context_.Dispose(context_->GetIsolate());
}
}
@@ -161,6 +161,7 @@ void DeclarationContext::Check(const char* source,
CHECK_EQ(value, catcher.Exception());
}
}
+ HEAP->CollectAllAvailableGarbage(); // Clean slate for the next test.
}
@@ -190,7 +191,8 @@ v8::Handle<Integer> DeclarationContext::HandleQuery(Local<String> key,
DeclarationContext* DeclarationContext::GetInstance(const AccessorInfo& info) {
- return static_cast<DeclarationContext*>(External::Unwrap(info.Data()));
+ void* value = External::Cast(*info.Data())->Value();
+ return static_cast<DeclarationContext*>(value);
}
@@ -699,7 +701,7 @@ class SimpleContext {
virtual ~SimpleContext() {
context_->Exit();
- context_.Dispose();
+ context_.Dispose(context_->GetIsolate());
}
void Check(const char* source,
@@ -734,7 +736,7 @@ class SimpleContext {
};
-TEST(MultiScriptConflicts) {
+TEST(CrossScriptReferences) {
HandleScope scope;
{ SimpleContext context;
@@ -772,135 +774,70 @@ TEST(MultiScriptConflicts) {
context.Check("function x() { return 7 }; x",
EXPECT_EXCEPTION);
}
+}
+
+TEST(CrossScriptReferencesHarmony) {
i::FLAG_use_strict = true;
i::FLAG_harmony_scoping = true;
+ i::FLAG_harmony_modules = true;
- { SimpleContext context;
- context.Check("var x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("x",
- EXPECT_RESULT, Number::New(1));
- context.Check("this.x",
- EXPECT_RESULT, Number::New(1));
- }
-
- { SimpleContext context;
- context.Check("function x() { return 4 }; x()",
- EXPECT_RESULT, Number::New(4));
- context.Check("x()",
- EXPECT_RESULT, Number::New(4));
- context.Check("this.x()",
- EXPECT_RESULT, Number::New(4));
- }
+ HandleScope scope;
- { SimpleContext context;
- context.Check("let x = 2; x",
- EXPECT_RESULT, Number::New(2));
- context.Check("x",
- EXPECT_RESULT, Number::New(2));
- // TODO(rossberg): The current ES6 draft spec does not reflect lexical
- // bindings on the global object. However, this will probably change, in
- // which case we reactivate the following test.
- // context.Check("this.x",
- // EXPECT_RESULT, Number::New(2));
- }
+ const char* decs[] = {
+ "var x = 1; x", "x", "this.x",
+ "function x() { return 1 }; x()", "x()", "this.x()",
+ "let x = 1; x", "x", "this.x",
+ "const x = 1; x", "x", "this.x",
+ "module x { export let a = 1 }; x.a", "x.a", "this.x.a",
+ NULL
+ };
- { SimpleContext context;
- context.Check("const x = 3; x",
- EXPECT_RESULT, Number::New(3));
- context.Check("x",
- EXPECT_RESULT, Number::New(3));
+ for (int i = 0; decs[i] != NULL; i += 3) {
+ SimpleContext context;
+ context.Check(decs[i], EXPECT_RESULT, Number::New(1));
+ context.Check(decs[i+1], EXPECT_RESULT, Number::New(1));
// TODO(rossberg): The current ES6 draft spec does not reflect lexical
// bindings on the global object. However, this will probably change, in
// which case we reactivate the following test.
- // context.Check("this.x",
- // EXPECT_RESULT, Number::New(3));
- }
-
- // TODO(rossberg): All of the below should actually be errors in Harmony.
-
- { SimpleContext context;
- context.Check("var x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("let x = 2; x",
- EXPECT_RESULT, Number::New(2));
- }
-
- { SimpleContext context;
- context.Check("var x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("const x = 2; x",
- EXPECT_RESULT, Number::New(2));
- }
-
- { SimpleContext context;
- context.Check("function x() { return 1 }; x()",
- EXPECT_RESULT, Number::New(1));
- context.Check("let x = 2; x",
- EXPECT_RESULT, Number::New(2));
- }
-
- { SimpleContext context;
- context.Check("function x() { return 1 }; x()",
- EXPECT_RESULT, Number::New(1));
- context.Check("const x = 2; x",
- EXPECT_RESULT, Number::New(2));
- }
-
- { SimpleContext context;
- context.Check("let x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("var x = 2; x",
- EXPECT_ERROR);
- }
-
- { SimpleContext context;
- context.Check("let x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("let x = 2; x",
- EXPECT_ERROR);
- }
-
- { SimpleContext context;
- context.Check("let x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("const x = 2; x",
- EXPECT_ERROR);
+ if (i/3 < 2) context.Check(decs[i+2], EXPECT_RESULT, Number::New(1));
}
+}
- { SimpleContext context;
- context.Check("let x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("function x() { return 2 }; x()",
- EXPECT_ERROR);
- }
- { SimpleContext context;
- context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("var x = 2; x",
- EXPECT_ERROR);
- }
+TEST(CrossScriptConflicts) {
+ i::FLAG_use_strict = true;
+ i::FLAG_harmony_scoping = true;
+ i::FLAG_harmony_modules = true;
- { SimpleContext context;
- context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("let x = 2; x",
- EXPECT_ERROR);
- }
+ HandleScope scope;
- { SimpleContext context;
- context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("const x = 2; x",
- EXPECT_ERROR);
- }
+ const char* firsts[] = {
+ "var x = 1; x",
+ "function x() { return 1 }; x()",
+ "let x = 1; x",
+ "const x = 1; x",
+ "module x { export let a = 1 }; x.a",
+ NULL
+ };
+ const char* seconds[] = {
+ "var x = 2; x",
+ "function x() { return 2 }; x()",
+ "let x = 2; x",
+ "const x = 2; x",
+ "module x { export let a = 2 }; x.a",
+ NULL
+ };
- { SimpleContext context;
- context.Check("const x = 1; x",
- EXPECT_RESULT, Number::New(1));
- context.Check("function x() { return 2 }; x()",
- EXPECT_ERROR);
+ for (int i = 0; firsts[i] != NULL; ++i) {
+ for (int j = 0; seconds[j] != NULL; ++j) {
+ SimpleContext context;
+ context.Check(firsts[i], EXPECT_RESULT, Number::New(1));
+ // TODO(rossberg): All tests should actually be errors in Harmony,
+ // but we currently do not detect the cases where the first declaration
+ // is not lexical.
+ context.Check(seconds[j],
+ i < 2 ? EXPECT_RESULT : EXPECT_ERROR, Number::New(2));
+ }
}
}
diff --git a/src/3rdparty/v8/test/cctest/test-dictionary.cc b/src/3rdparty/v8/test/cctest/test-dictionary.cc
index 00e3833..2acd4e6 100644
--- a/src/3rdparty/v8/test/cctest/test-dictionary.cc
+++ b/src/3rdparty/v8/test/cctest/test-dictionary.cc
@@ -114,7 +114,8 @@ TEST(ObjectHashSetCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- FLAG_gc_interval = 0;
+ SimulateFullSpace(HEAP->new_space());
+ SimulateFullSpace(HEAP->old_pointer_space());
// Calling Contains() should not cause GC ever.
CHECK(!table->Contains(*key));
@@ -143,7 +144,8 @@ TEST(ObjectHashTableCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- FLAG_gc_interval = 0;
+ SimulateFullSpace(HEAP->new_space());
+ SimulateFullSpace(HEAP->old_pointer_space());
// Calling Lookup() should not cause GC ever.
CHECK(table->Lookup(*key)->IsTheHole());
diff --git a/src/3rdparty/v8/test/cctest/test-disasm-arm.cc b/src/3rdparty/v8/test/cctest/test-disasm-arm.cc
index 3a2d9e8..8d0a18e 100644
--- a/src/3rdparty/v8/test/cctest/test-disasm-arm.cc
+++ b/src/3rdparty/v8/test/cctest/test-disasm-arm.cc
@@ -425,10 +425,14 @@ TEST(Vfp) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
+ COMPARE(vmov(d0, r2, r3),
+ "ec432b10 vmov d0, r2, r3");
+ COMPARE(vmov(r2, r3, d0),
+ "ec532b10 vmov r2, r3, d0");
COMPARE(vmov(d0, d1),
"eeb00b41 vmov.f64 d0, d1");
COMPARE(vmov(d3, d3, eq),
- "0eb03b43 vmov.f64eq d3, d3");
+ "0eb03b43 vmoveq.f64 d3, d3");
COMPARE(vmov(s0, s31),
"eeb00a6f vmov.f32 s0, s31");
@@ -446,43 +450,53 @@ TEST(Vfp) {
COMPARE(vabs(d0, d1),
"eeb00bc1 vabs.f64 d0, d1");
COMPARE(vabs(d3, d4, mi),
- "4eb03bc4 vabs.f64mi d3, d4");
+ "4eb03bc4 vabsmi.f64 d3, d4");
COMPARE(vneg(d0, d1),
"eeb10b41 vneg.f64 d0, d1");
COMPARE(vneg(d3, d4, mi),
- "4eb13b44 vneg.f64mi d3, d4");
+ "4eb13b44 vnegmi.f64 d3, d4");
COMPARE(vadd(d0, d1, d2),
"ee310b02 vadd.f64 d0, d1, d2");
COMPARE(vadd(d3, d4, d5, mi),
- "4e343b05 vadd.f64mi d3, d4, d5");
+ "4e343b05 vaddmi.f64 d3, d4, d5");
COMPARE(vsub(d0, d1, d2),
"ee310b42 vsub.f64 d0, d1, d2");
COMPARE(vsub(d3, d4, d5, ne),
- "1e343b45 vsub.f64ne d3, d4, d5");
+ "1e343b45 vsubne.f64 d3, d4, d5");
COMPARE(vmul(d2, d1, d0),
"ee212b00 vmul.f64 d2, d1, d0");
COMPARE(vmul(d6, d4, d5, cc),
- "3e246b05 vmul.f64cc d6, d4, d5");
+ "3e246b05 vmulcc.f64 d6, d4, d5");
COMPARE(vdiv(d2, d2, d2),
"ee822b02 vdiv.f64 d2, d2, d2");
COMPARE(vdiv(d6, d7, d7, hi),
- "8e876b07 vdiv.f64hi d6, d7, d7");
+ "8e876b07 vdivhi.f64 d6, d7, d7");
+
+ COMPARE(vcmp(d0, d1),
+ "eeb40b41 vcmp.f64 d0, d1");
+ COMPARE(vcmp(d0, 0.0),
+ "eeb50b40 vcmp.f64 d0, #0.0");
COMPARE(vsqrt(d0, d0),
"eeb10bc0 vsqrt.f64 d0, d0");
COMPARE(vsqrt(d2, d3, ne),
- "1eb12bc3 vsqrt.f64ne d2, d3");
+ "1eb12bc3 vsqrtne.f64 d2, d3");
COMPARE(vmov(d0, 1.0),
"eeb70b00 vmov.f64 d0, #1");
COMPARE(vmov(d2, -13.0),
"eeba2b0a vmov.f64 d2, #-13");
+ COMPARE(vmov(d0, VmovIndexLo, r0),
+ "ee000b10 vmov.32 d0[0], r0");
+ COMPARE(vmov(d0, VmovIndexHi, r0),
+ "ee200b10 vmov.32 d0[1], r0");
+
COMPARE(vldr(s0, r0, 0),
"ed900a00 vldr s0, [r0 + 4*0]");
COMPARE(vldr(s1, r1, 4),
@@ -547,6 +561,107 @@ TEST(Vfp) {
"ec860a20 vstmia r6, {s0-s31}");
COMPARE(vldm(ia, r7, s0, s31),
"ec970a20 vldmia r7, {s0-s31}");
+
+ COMPARE(vmla(d2, d1, d0),
+ "ee012b00 vmla.f64 d2, d1, d0");
+ COMPARE(vmla(d6, d4, d5, cc),
+ "3e046b05 vmlacc.f64 d6, d4, d5");
+
+ COMPARE(vmls(d2, d1, d0),
+ "ee012b40 vmls.f64 d2, d1, d0");
+ COMPARE(vmls(d6, d4, d5, cc),
+ "3e046b45 vmlscc.f64 d6, d4, d5");
+
+ COMPARE(vcvt_u32_f64(s0, d0),
+ "eebc0bc0 vcvt.u32.f64 s0, d0");
+ COMPARE(vcvt_s32_f64(s0, d0),
+ "eebd0bc0 vcvt.s32.f64 s0, d0");
+ COMPARE(vcvt_f64_u32(d0, s1),
+ "eeb80b60 vcvt.f64.u32 d0, s1");
+ COMPARE(vcvt_f64_s32(d0, s1),
+ "eeb80be0 vcvt.f64.s32 d0, s1");
+ COMPARE(vcvt_f32_s32(s0, s2),
+ "eeb80ac1 vcvt.f32.s32 s0, s2");
+
+ if (CpuFeatures::IsSupported(VFP32DREGS)) {
+ COMPARE(vmov(d3, d27),
+ "eeb03b6b vmov.f64 d3, d27");
+ COMPARE(vmov(d18, d7),
+ "eef02b47 vmov.f64 d18, d7");
+ COMPARE(vmov(d18, r2, r3),
+ "ec432b32 vmov d18, r2, r3");
+ COMPARE(vmov(r2, r3, d18),
+ "ec532b32 vmov r2, r3, d18");
+ COMPARE(vmov(d20, d31),
+ "eef04b6f vmov.f64 d20, d31");
+
+ COMPARE(vabs(d16, d31),
+ "eef00bef vabs.f64 d16, d31");
+
+ COMPARE(vneg(d16, d31),
+ "eef10b6f vneg.f64 d16, d31");
+
+ COMPARE(vadd(d16, d17, d18),
+ "ee710ba2 vadd.f64 d16, d17, d18");
+
+ COMPARE(vsub(d16, d17, d18),
+ "ee710be2 vsub.f64 d16, d17, d18");
+
+ COMPARE(vmul(d16, d17, d18),
+ "ee610ba2 vmul.f64 d16, d17, d18");
+
+ COMPARE(vdiv(d16, d17, d18),
+ "eec10ba2 vdiv.f64 d16, d17, d18");
+
+ COMPARE(vcmp(d16, d17),
+ "eef40b61 vcmp.f64 d16, d17");
+ COMPARE(vcmp(d16, 0.0),
+ "eef50b40 vcmp.f64 d16, #0.0");
+
+ COMPARE(vsqrt(d16, d17),
+ "eef10be1 vsqrt.f64 d16, d17");
+
+ COMPARE(vmov(d30, 16.0),
+ "eef3eb00 vmov.f64 d30, #16");
+
+ COMPARE(vmov(d31, VmovIndexLo, r7),
+ "ee0f7b90 vmov.32 d31[0], r7");
+ COMPARE(vmov(d31, VmovIndexHi, r7),
+ "ee2f7b90 vmov.32 d31[1], r7");
+
+ COMPARE(vldr(d25, r0, 0),
+ "edd09b00 vldr d25, [r0 + 4*0]");
+ COMPARE(vldr(d26, r1, 4),
+ "edd1ab01 vldr d26, [r1 + 4*1]");
+ COMPARE(vldr(d31, r10, 1020),
+ "eddafbff vldr d31, [r10 + 4*255]");
+
+ COMPARE(vstr(d16, r0, 0),
+ "edc00b00 vstr d16, [r0 + 4*0]");
+ COMPARE(vstr(d17, r1, 4),
+ "edc11b01 vstr d17, [r1 + 4*1]");
+ COMPARE(vstr(d31, r10, 1020),
+ "edcafbff vstr d31, [r10 + 4*255]");
+
+ COMPARE(vstm(ia, r0, d16, d31),
+ "ecc00b20 vstmia r0, {d16-d31}");
+ COMPARE(vldm(ia, r3, d16, d31),
+ "ecd30b20 vldmia r3, {d16-d31}");
+ COMPARE(vstm(ia, r0, d23, d27),
+ "ecc07b0a vstmia r0, {d23-d27}");
+ COMPARE(vldm(ia, r3, d23, d27),
+ "ecd37b0a vldmia r3, {d23-d27}");
+
+ COMPARE(vmla(d16, d17, d18),
+ "ee410ba2 vmla.f64 d16, d17, d18");
+
+ COMPARE(vcvt_u32_f64(s0, d16),
+ "eebc0be0 vcvt.u32.f64 s0, d16");
+ COMPARE(vcvt_s32_f64(s0, d16),
+ "eebd0be0 vcvt.s32.f64 s0, d16");
+ COMPARE(vcvt_f64_u32(d16, s1),
+ "eef80b60 vcvt.f64.u32 d16, s1");
+ }
}
VERIFY_RUN();
@@ -753,4 +868,3 @@ TEST(LoadStore) {
VERIFY_RUN();
}
-
diff --git a/src/3rdparty/v8/test/cctest/test-disasm-ia32.cc b/src/3rdparty/v8/test/cctest/test-disasm-ia32.cc
index da09505..59d1e04 100644
--- a/src/3rdparty/v8/test/cctest/test-disasm-ia32.cc
+++ b/src/3rdparty/v8/test/cctest/test-disasm-ia32.cc
@@ -76,7 +76,7 @@ TEST(DisasmIa320) {
// ---- All instructions that I can think of
__ add(edx, ebx);
- __ add(edx, Operand(12, RelocInfo::NONE));
+ __ add(edx, Operand(12, RelocInfo::NONE32));
__ add(edx, Operand(ebx, 0));
__ add(edx, Operand(ebx, 16));
__ add(edx, Operand(ebx, 1999));
@@ -442,7 +442,8 @@ TEST(DisasmIa320) {
}
{
- if (CpuFeatures::IsSupported(SSE4_1)) {
+ if (CpuFeatures::IsSupported(SSE2) &&
+ CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatures::Scope scope(SSE4_1);
__ pextrd(eax, xmm0, 1);
__ pinsrd(xmm1, eax, 0);
@@ -458,10 +459,11 @@ TEST(DisasmIa320) {
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Isolate* isolate = Isolate::Current();
+ Object* code = isolate->heap()->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
diff --git a/src/3rdparty/v8/test/cctest/test-disasm-x64.cc b/src/3rdparty/v8/test/cctest/test-disasm-x64.cc
index c6332e2..fe7aab8 100644
--- a/src/3rdparty/v8/test/cctest/test-disasm-x64.cc
+++ b/src/3rdparty/v8/test/cctest/test-disasm-x64.cc
@@ -417,7 +417,7 @@ TEST(DisasmX64) {
Object* code = HEAP->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
#ifdef OBJECT_PRINT
Code::cast(code)->Print();
diff --git a/src/3rdparty/v8/test/cctest/test-global-object.cc b/src/3rdparty/v8/test/cctest/test-global-object.cc
new file mode 100644
index 0000000..16c0be0
--- /dev/null
+++ b/src/3rdparty/v8/test/cctest/test-global-object.cc
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cctest.h"
+
+using namespace v8;
+
+// This test fails if properties on the prototype of the global object appear
+// as declared globals.
+TEST(StrictUndeclaredGlobalVariable) {
+ HandleScope scope;
+ v8::Local<v8::String> var_name = v8_str("x");
+ LocalContext context;
+ v8::TryCatch try_catch;
+ v8::Local<v8::Script> script = v8_compile("\"use strict\"; x = 42;");
+ v8::Handle<v8::Object> proto = v8::Object::New();
+ v8::Handle<v8::Object> global =
+ context->Global()->GetPrototype().As<v8::Object>();
+ proto->Set(var_name, v8_num(100));
+ global->SetPrototype(proto);
+ script->Run();
+ CHECK(try_catch.HasCaught());
+ v8::String::Utf8Value exception(try_catch.Exception());
+ CHECK_EQ("ReferenceError: x is not defined", *exception);
+}
diff --git a/src/3rdparty/v8/test/cctest/test-hashing.cc b/src/3rdparty/v8/test/cctest/test-hashing.cc
index a626510..605b59b 100644
--- a/src/3rdparty/v8/test/cctest/test-hashing.cc
+++ b/src/3rdparty/v8/test/cctest/test-hashing.cc
@@ -49,7 +49,7 @@ static v8::Persistent<v8::Context> env;
#define __ masm->
-void generate(MacroAssembler* masm, i::Vector<const char> string) {
+void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
// GenerateHashInit takes the first character as an argument so it can't
// handle the zero length string.
ASSERT(string.length() > 0);
@@ -152,23 +152,26 @@ void generate(MacroAssembler* masm, uint32_t key) {
}
-void check(i::Vector<const char> string) {
- v8::HandleScope scope;
+void check(i::Vector<const uint8_t> string) {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
v8::internal::byte buffer[2048];
- MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
+ MacroAssembler masm(isolate, buffer, sizeof buffer);
generate(&masm, string);
CodeDesc desc;
masm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Code> code = factory->NewCode(desc,
+ Code::ComputeFlags(Code::STUB),
+ undefined);
CHECK(code->IsCode());
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
- Handle<String> v8_string = FACTORY->NewStringFromAscii(string);
+ Handle<String> v8_string = factory->NewStringFromOneByte(string);
v8_string->set_hash_field(String::kEmptyHashField);
#ifdef USE_SIMULATOR
uint32_t codegen_hash =
@@ -181,8 +184,16 @@ void check(i::Vector<const char> string) {
}
+void check(i::Vector<const char> s) {
+ check(i::Vector<const uint8_t>::cast(s));
+}
+
+
void check(uint32_t key) {
- v8::HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+
v8::internal::byte buffer[2048];
MacroAssembler masm(Isolate::Current(), buffer, sizeof buffer);
@@ -190,10 +201,10 @@ void check(uint32_t key) {
CodeDesc desc;
masm.GetCode(&desc);
- Code* code = Code::cast(HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
+ Handle<Code> code = factory->NewCode(desc,
+ Code::ComputeFlags(Code::STUB),
+ undefined);
CHECK(code->IsCode());
HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
@@ -204,16 +215,14 @@ void check(uint32_t key) {
uint32_t codegen_hash = hash();
#endif
- uint32_t runtime_hash = ComputeIntegerHash(
- key,
- Isolate::Current()->heap()->HashSeed());
+ uint32_t runtime_hash = ComputeIntegerHash(key, isolate->heap()->HashSeed());
CHECK(runtime_hash == codegen_hash);
}
-void check_twochars(char a, char b) {
- char ab[2] = {a, b};
- check(i::Vector<const char>(ab, 2));
+void check_twochars(uint8_t a, uint8_t b) {
+ uint8_t ab[2] = {a, b};
+ check(i::Vector<const uint8_t>(ab, 2));
}
@@ -224,12 +233,12 @@ static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
TEST(StringHash) {
if (env.IsEmpty()) env = v8::Context::New();
- for (int a = 0; a < String::kMaxAsciiCharCode; a++) {
+ for (uint8_t a = 0; a < String::kMaxOneByteCharCode; a++) {
// Numbers are hashed differently.
if (a >= '0' && a <= '9') continue;
- for (int b = 0; b < String::kMaxAsciiCharCode; b++) {
+ for (uint8_t b = 0; b < String::kMaxOneByteCharCode; b++) {
if (b >= '0' && b <= '9') continue;
- check_twochars(static_cast<char>(a), static_cast<char>(b));
+ check_twochars(a, b);
}
}
check(i::Vector<const char>("*", 1));
diff --git a/src/3rdparty/v8/test/cctest/test-heap-profiler.cc b/src/3rdparty/v8/test/cctest/test-heap-profiler.cc
index 2a60785..b709506 100644
--- a/src/3rdparty/v8/test/cctest/test-heap-profiler.cc
+++ b/src/3rdparty/v8/test/cctest/test-heap-profiler.cc
@@ -362,6 +362,44 @@ TEST(HeapSnapshotInternalReferences) {
#define CHECK_NE_SNAPSHOT_OBJECT_ID(a, b) \
CHECK((a) != (b)) // NOLINT
+TEST(HeapSnapshotAddressReuse) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "function A() {}\n"
+ "var a = [];\n"
+ "for (var i = 0; i < 10000; ++i)\n"
+ " a[i] = new A();\n");
+ const v8::HeapSnapshot* snapshot1 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("snapshot1"));
+ v8::SnapshotObjectId maxId1 = snapshot1->GetMaxSnapshotJSObjectId();
+
+ CompileRun(
+ "for (var i = 0; i < 10000; ++i)\n"
+ " a[i] = new A();\n");
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ const v8::HeapSnapshot* snapshot2 =
+ v8::HeapProfiler::TakeSnapshot(v8_str("snapshot2"));
+ const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
+
+ const v8::HeapGraphNode* array_node =
+ GetProperty(global2, v8::HeapGraphEdge::kProperty, "a");
+ CHECK_NE(NULL, array_node);
+ int wrong_count = 0;
+ for (int i = 0, count = array_node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = array_node->GetChild(i);
+ if (prop->GetType() != v8::HeapGraphEdge::kElement)
+ continue;
+ v8::SnapshotObjectId id = prop->GetToNode()->GetId();
+ if (id < maxId1)
+ ++wrong_count;
+ }
+ CHECK_EQ(0, wrong_count);
+}
+
+
TEST(HeapEntryIdsAndArrayShift) {
v8::HandleScope scope;
LocalContext env;
@@ -605,7 +643,8 @@ TEST(HeapSnapshotJSONSerialization) {
// Get the string index using the path: <root> -> <global>.b.x.s
v8::Local<v8::Value> string_obj_pos_val;
if (i::Snapshot::IsEnabled()) {
- // TODO(pvarga): Check shortcut_type issue at b when QML global object is used.
+ // TODO(pvarga): Check shortcut_type issue at b when
+ // QML global object is used.
string_obj_pos_val = CompileRun(
"GetChildPosByProperty(\n"
" GetChildPosByProperty(\n"
@@ -1031,7 +1070,6 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
private:
bool disposed_;
- int category_;
int hash_;
const char* group_label_;
const char* label_;
@@ -1062,20 +1100,21 @@ static const v8::HeapGraphNode* GetNode(const v8::HeapGraphNode* parent,
TEST(HeapSnapshotRetainedObjectInfo) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
v8::HeapProfiler::DefineWrapperClass(
1, TestRetainedObjectInfo::WrapperInfoCallback);
v8::HeapProfiler::DefineWrapperClass(
2, TestRetainedObjectInfo::WrapperInfoCallback);
v8::Persistent<v8::String> p_AAA =
- v8::Persistent<v8::String>::New(v8_str("AAA"));
- p_AAA.SetWrapperClassId(1);
+ v8::Persistent<v8::String>::New(isolate, v8_str("AAA"));
+ p_AAA.SetWrapperClassId(isolate, 1);
v8::Persistent<v8::String> p_BBB =
- v8::Persistent<v8::String>::New(v8_str("BBB"));
- p_BBB.SetWrapperClassId(1);
+ v8::Persistent<v8::String>::New(isolate, v8_str("BBB"));
+ p_BBB.SetWrapperClassId(isolate, 1);
v8::Persistent<v8::String> p_CCC =
- v8::Persistent<v8::String>::New(v8_str("CCC"));
- p_CCC.SetWrapperClassId(2);
+ v8::Persistent<v8::String>::New(isolate, v8_str("CCC"));
+ p_CCC.SetWrapperClassId(isolate, 2);
CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8_str("retained"));
@@ -1124,8 +1163,9 @@ class GraphWithImplicitRefs {
explicit GraphWithImplicitRefs(LocalContext* env) {
CHECK_EQ(NULL, instance_);
instance_ = this;
+ v8::Isolate* isolate = (*env)->GetIsolate();
for (int i = 0; i < kObjectsCount; i++) {
- objects_[i] = v8::Persistent<v8::Object>::New(v8::Object::New());
+ objects_[i] = v8::Persistent<v8::Object>::New(isolate, v8::Object::New());
}
(*env)->Global()->Set(v8_str("root_object"), objects_[0]);
}
@@ -1133,7 +1173,7 @@ class GraphWithImplicitRefs {
instance_ = NULL;
}
- static void gcPrologue() {
+ static void gcPrologue(v8::GCType type, v8::GCCallbackFlags flags) {
instance_->AddImplicitReferences();
}
@@ -1159,7 +1199,7 @@ TEST(HeapSnapshotImplicitReferences) {
LocalContext env;
GraphWithImplicitRefs graph(&env);
- v8::V8::SetGlobalGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
+ v8::V8::AddGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8_str("implicit_refs"));
@@ -1182,7 +1222,7 @@ TEST(HeapSnapshotImplicitReferences) {
}
}
CHECK_EQ(2, implicit_targets_count);
- v8::V8::SetGlobalGCPrologueCallback(NULL);
+ v8::V8::RemoveGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
}
@@ -1244,51 +1284,28 @@ TEST(DeleteHeapSnapshot) {
}
-TEST(DocumentURL) {
- v8::HandleScope scope;
- LocalContext env;
-
- CompileRun("document = { URL:\"abcdefgh\" };");
-
- const v8::HeapSnapshot* snapshot =
- v8::HeapProfiler::TakeSnapshot(v8_str("document"));
- const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
- CHECK_EQ("Object / abcdefgh",
- const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
+class NameResolver : public v8::HeapProfiler::ObjectNameResolver {
+ public:
+ virtual const char* GetName(v8::Handle<v8::Object> object) {
+ return "Global object name";
+ }
+};
-TEST(DocumentWithException) {
+TEST(GlobalObjectName) {
v8::HandleScope scope;
LocalContext env;
- CompileRun(
- "this.__defineGetter__(\"document\", function() { throw new Error(); })");
- const v8::HeapSnapshot* snapshot =
- v8::HeapProfiler::TakeSnapshot(v8_str("document"));
- const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
- CHECK_EQ("Object",
- const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
-
-TEST(DocumentURLWithException) {
- v8::HandleScope scope;
- LocalContext env;
+ CompileRun("document = { URL:\"abcdefgh\" };");
- CompileRun(
- "function URLWithException() {}\n"
- "URLWithException.prototype = { get URL() { throw new Error(); } };\n"
- "document = { URL: new URLWithException() };");
+ NameResolver name_resolver;
const v8::HeapSnapshot* snapshot =
- v8::HeapProfiler::TakeSnapshot(v8_str("document"));
+ v8::HeapProfiler::TakeSnapshot(v8_str("document"),
+ v8::HeapSnapshot::kFull,
+ NULL,
+ &name_resolver);
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK_NE(NULL, global);
- CHECK_EQ("Object",
+ CHECK_EQ("Object / Global object name" ,
const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(global))->name());
}
@@ -1301,9 +1318,10 @@ TEST(NoHandleLeaks) {
CompileRun("document = { URL:\"abcdefgh\" };");
v8::Handle<v8::String> name(v8_str("leakz"));
- int count_before = i::HandleScope::NumberOfHandles();
+ i::Isolate* isolate = i::Isolate::Current();
+ int count_before = i::HandleScope::NumberOfHandles(isolate);
v8::HeapProfiler::TakeSnapshot(name);
- int count_after = i::HandleScope::NumberOfHandles();
+ int count_after = i::HandleScope::NumberOfHandles(isolate);
CHECK_EQ(count_before, count_after);
}
@@ -1518,8 +1536,10 @@ bool HasWeakGlobalHandle() {
}
-static void PersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
- handle.Dispose();
+static void PersistentHandleCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void*) {
+ handle.Dispose(isolate);
}
@@ -1530,8 +1550,8 @@ TEST(WeakGlobalHandle) {
CHECK(!HasWeakGlobalHandle());
v8::Persistent<v8::Object> handle =
- v8::Persistent<v8::Object>::New(v8::Object::New());
- handle.MakeWeak(NULL, PersistentHandleCallback);
+ v8::Persistent<v8::Object>::New(env->GetIsolate(), v8::Object::New());
+ handle.MakeWeak(env->GetIsolate(), NULL, PersistentHandleCallback);
CHECK(HasWeakGlobalHandle());
}
@@ -1606,6 +1626,7 @@ TEST(NoDebugObjectInSnapshot) {
TEST(PersistentHandleCount) {
v8::HandleScope scope;
LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
// V8 also uses global handles internally, so we can't test for an absolute
// number.
@@ -1613,26 +1634,26 @@ TEST(PersistentHandleCount) {
// Create some persistent handles.
v8::Persistent<v8::String> p_AAA =
- v8::Persistent<v8::String>::New(v8_str("AAA"));
+ v8::Persistent<v8::String>::New(isolate, v8_str("AAA"));
CHECK_EQ(global_handle_count + 1,
v8::HeapProfiler::GetPersistentHandleCount());
v8::Persistent<v8::String> p_BBB =
- v8::Persistent<v8::String>::New(v8_str("BBB"));
+ v8::Persistent<v8::String>::New(isolate, v8_str("BBB"));
CHECK_EQ(global_handle_count + 2,
v8::HeapProfiler::GetPersistentHandleCount());
v8::Persistent<v8::String> p_CCC =
- v8::Persistent<v8::String>::New(v8_str("CCC"));
+ v8::Persistent<v8::String>::New(isolate, v8_str("CCC"));
CHECK_EQ(global_handle_count + 3,
v8::HeapProfiler::GetPersistentHandleCount());
// Dipose the persistent handles in a different order.
- p_AAA.Dispose();
+ p_AAA.Dispose(env->GetIsolate());
CHECK_EQ(global_handle_count + 2,
v8::HeapProfiler::GetPersistentHandleCount());
- p_CCC.Dispose();
+ p_CCC.Dispose(env->GetIsolate());
CHECK_EQ(global_handle_count + 1,
v8::HeapProfiler::GetPersistentHandleCount());
- p_BBB.Dispose();
+ p_BBB.Dispose(env->GetIsolate());
CHECK_EQ(global_handle_count, v8::HeapProfiler::GetPersistentHandleCount());
}
diff --git a/src/3rdparty/v8/test/cctest/test-heap.cc b/src/3rdparty/v8/test/cctest/test-heap.cc
index 0d72ff7..c1c3c17 100644
--- a/src/3rdparty/v8/test/cctest/test-heap.cc
+++ b/src/3rdparty/v8/test/cctest/test-heap.cc
@@ -27,8 +27,10 @@ static void InitializeVM() {
// Go through all incremental marking steps in one swoop.
static void SimulateIncrementalMarking() {
IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsStopped());
- marking->Start();
+ CHECK(marking->IsMarking() || marking->IsStopped());
+ if (marking->IsStopped()) {
+ marking->Start();
+ }
CHECK(marking->IsMarking());
while (!marking->IsComplete()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
@@ -57,62 +59,65 @@ TEST(HeapMaps) {
}
-static void CheckOddball(Object* obj, const char* string) {
+static void CheckOddball(Isolate* isolate, Object* obj, const char* string) {
CHECK(obj->IsOddball());
bool exc;
- Object* print_string = *Execution::ToString(Handle<Object>(obj), &exc);
- CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+ Object* print_string =
+ *Execution::ToString(Handle<Object>(obj, isolate), &exc);
+ CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
-static void CheckSmi(int value, const char* string) {
+static void CheckSmi(Isolate* isolate, int value, const char* string) {
bool exc;
Object* print_string =
- *Execution::ToString(Handle<Object>(Smi::FromInt(value)), &exc);
- CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+ *Execution::ToString(Handle<Object>(Smi::FromInt(value), isolate), &exc);
+ CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
-static void CheckNumber(double value, const char* string) {
+static void CheckNumber(Isolate* isolate, double value, const char* string) {
Object* obj = HEAP->NumberFromDouble(value)->ToObjectChecked();
CHECK(obj->IsNumber());
bool exc;
- Object* print_string = *Execution::ToString(Handle<Object>(obj), &exc);
- CHECK(String::cast(print_string)->IsEqualTo(CStrVector(string)));
+ Object* print_string =
+ *Execution::ToString(Handle<Object>(obj, isolate), &exc);
+ CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
-static void CheckFindCodeObject() {
+static void CheckFindCodeObject(Isolate* isolate) {
// Test FindCodeObject
#define __ assm.
- Assembler assm(Isolate::Current(), NULL, 0);
+ Assembler assm(isolate, NULL, 0);
__ nop(); // supported on all architectures
CodeDesc desc;
assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
+ Heap* heap = isolate->heap();
+ Object* code = heap->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(code->IsCode());
HeapObject* obj = HeapObject::cast(code);
Address obj_addr = obj->address();
for (int i = 0; i < obj->Size(); i += kPointerSize) {
- Object* found = HEAP->FindCodeObject(obj_addr + i);
+ Object* found = heap->FindCodeObject(obj_addr + i);
CHECK_EQ(code, found);
}
- Object* copy = HEAP->CreateCode(
+ Object* copy = heap->CreateCode(
desc,
Code::ComputeFlags(Code::STUB),
- Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ Handle<Code>())->ToObjectChecked();
CHECK(copy->IsCode());
HeapObject* obj_copy = HeapObject::cast(copy);
- Object* not_right = HEAP->FindCodeObject(obj_copy->address() +
+ Object* not_right = heap->FindCodeObject(obj_copy->address() +
obj_copy->Size() / 2);
CHECK(not_right != code);
}
@@ -120,77 +125,86 @@ static void CheckFindCodeObject() {
TEST(HeapObjects) {
InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::HandleScope sc;
- Object* value = HEAP->NumberFromDouble(1.000123)->ToObjectChecked();
+ Object* value = heap->NumberFromDouble(1.000123)->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(1.000123, value->Number());
- value = HEAP->NumberFromDouble(1.0)->ToObjectChecked();
+ value = heap->NumberFromDouble(1.0)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(1.0, value->Number());
- value = HEAP->NumberFromInt32(1024)->ToObjectChecked();
+ value = heap->NumberFromInt32(1024)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(1024.0, value->Number());
- value = HEAP->NumberFromInt32(Smi::kMinValue)->ToObjectChecked();
+ value = heap->NumberFromInt32(Smi::kMinValue)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMinValue, Smi::cast(value)->value());
- value = HEAP->NumberFromInt32(Smi::kMaxValue)->ToObjectChecked();
+ value = heap->NumberFromInt32(Smi::kMaxValue)->ToObjectChecked();
CHECK(value->IsSmi());
CHECK(value->IsNumber());
CHECK_EQ(Smi::kMaxValue, Smi::cast(value)->value());
#ifndef V8_TARGET_ARCH_X64
// TODO(lrn): We need a NumberFromIntptr function in order to test this.
- value = HEAP->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
+ value = heap->NumberFromInt32(Smi::kMinValue - 1)->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(Smi::kMinValue - 1), value->Number());
#endif
MaybeObject* maybe_value =
- HEAP->NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
+ heap->NumberFromUint32(static_cast<uint32_t>(Smi::kMaxValue) + 1);
value = maybe_value->ToObjectChecked();
CHECK(value->IsHeapNumber());
CHECK(value->IsNumber());
CHECK_EQ(static_cast<double>(static_cast<uint32_t>(Smi::kMaxValue) + 1),
value->Number());
+ maybe_value = heap->NumberFromUint32(static_cast<uint32_t>(1) << 31);
+ value = maybe_value->ToObjectChecked();
+ CHECK(value->IsHeapNumber());
+ CHECK(value->IsNumber());
+ CHECK_EQ(static_cast<double>(static_cast<uint32_t>(1) << 31),
+ value->Number());
+
// nan oddball checks
- CHECK(HEAP->nan_value()->IsNumber());
- CHECK(isnan(HEAP->nan_value()->Number()));
+ CHECK(heap->nan_value()->IsNumber());
+ CHECK(isnan(heap->nan_value()->Number()));
Handle<String> s = FACTORY->NewStringFromAscii(CStrVector("fisk hest "));
CHECK(s->IsString());
CHECK_EQ(10, s->length());
- String* object_symbol = String::cast(HEAP->Object_symbol());
+ String* object_string = String::cast(heap->Object_string());
CHECK(
Isolate::Current()->context()->global_object()->HasLocalProperty(
- object_symbol));
+ object_string));
// Check ToString for oddballs
- CheckOddball(HEAP->true_value(), "true");
- CheckOddball(HEAP->false_value(), "false");
- CheckOddball(HEAP->null_value(), "null");
- CheckOddball(HEAP->undefined_value(), "undefined");
+ CheckOddball(isolate, heap->true_value(), "true");
+ CheckOddball(isolate, heap->false_value(), "false");
+ CheckOddball(isolate, heap->null_value(), "null");
+ CheckOddball(isolate, heap->undefined_value(), "undefined");
// Check ToString for Smis
- CheckSmi(0, "0");
- CheckSmi(42, "42");
- CheckSmi(-42, "-42");
+ CheckSmi(isolate, 0, "0");
+ CheckSmi(isolate, 42, "42");
+ CheckSmi(isolate, -42, "-42");
// Check ToString for Numbers
- CheckNumber(1.1, "1.1");
+ CheckNumber(isolate, 1.1, "1.1");
- CheckFindCodeObject();
+ CheckFindCodeObject(isolate);
}
@@ -212,28 +226,31 @@ TEST(Tagging) {
TEST(GarbageCollection) {
InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
v8::HandleScope sc;
// Check GC.
- HEAP->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
- Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
- Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
- Handle<String> prop_namex = FACTORY->LookupAsciiSymbol("theSlotx");
- Handle<String> obj_name = FACTORY->LookupAsciiSymbol("theObject");
+ Handle<String> name = factory->InternalizeUtf8String("theFunction");
+ Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
+ Handle<String> prop_namex = factory->InternalizeUtf8String("theSlotx");
+ Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
{
- v8::HandleScope inner_scope;
+ HandleScope inner_scope(isolate);
// Allocate a function and keep it in global object's property.
Handle<JSFunction> function =
- FACTORY->NewFunction(name, FACTORY->undefined_value());
+ factory->NewFunction(name, factory->undefined_value());
Handle<Map> initial_map =
- FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
Isolate::Current()->context()->global_object()->SetProperty(
*name, *function, NONE, kNonStrictMode)->ToObjectChecked();
// Allocate an object. Unrooted after leaving the scope.
- Handle<JSObject> obj = FACTORY->NewJSObject(function);
+ Handle<JSObject> obj = factory->NewJSObject(function);
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
obj->SetProperty(
@@ -243,7 +260,7 @@ TEST(GarbageCollection) {
CHECK_EQ(Smi::FromInt(24), obj->GetProperty(*prop_namex));
}
- HEAP->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
// Function should be alive.
CHECK(Isolate::Current()->context()->global_object()->
@@ -255,9 +272,9 @@ TEST(GarbageCollection) {
Handle<JSFunction> function(JSFunction::cast(func_value));
{
- HandleScope inner_scope;
+ HandleScope inner_scope(isolate);
// Allocate another object, make it reachable from global.
- Handle<JSObject> obj = FACTORY->NewJSObject(function);
+ Handle<JSObject> obj = factory->NewJSObject(function);
Isolate::Current()->context()->global_object()->SetProperty(
*obj_name, *obj, NONE, kNonStrictMode)->ToObjectChecked();
obj->SetProperty(
@@ -265,7 +282,7 @@ TEST(GarbageCollection) {
}
// After gc, it should survive.
- HEAP->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
CHECK(Isolate::Current()->context()->global_object()->
HasLocalProperty(*obj_name));
@@ -311,7 +328,10 @@ TEST(LocalHandles) {
TEST(GlobalHandles) {
InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
Handle<Object> h1;
Handle<Object> h2;
@@ -319,10 +339,10 @@ TEST(GlobalHandles) {
Handle<Object> h4;
{
- HandleScope scope;
+ HandleScope scope(isolate);
- Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = FACTORY->NewNumber(1.12344);
+ Handle<Object> i = factory->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
h2 = global_handles->Create(*u);
@@ -331,7 +351,7 @@ TEST(GlobalHandles) {
}
// after gc, it should survive
- HEAP->CollectGarbage(NEW_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -350,16 +370,20 @@ TEST(GlobalHandles) {
static bool WeakPointerCleared = false;
-static void TestWeakGlobalHandleCallback(v8::Persistent<v8::Value> handle,
+static void TestWeakGlobalHandleCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
void* id) {
if (1234 == reinterpret_cast<intptr_t>(id)) WeakPointerCleared = true;
- handle.Dispose();
+ handle.Dispose(isolate);
}
TEST(WeakGlobalHandlesScavenge) {
InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
WeakPointerCleared = false;
@@ -367,10 +391,10 @@ TEST(WeakGlobalHandlesScavenge) {
Handle<Object> h2;
{
- HandleScope scope;
+ HandleScope scope(isolate);
- Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = FACTORY->NewNumber(1.12344);
+ Handle<Object> i = factory->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
h2 = global_handles->Create(*u);
@@ -378,10 +402,11 @@ TEST(WeakGlobalHandlesScavenge) {
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&TestWeakGlobalHandleCallback);
// Scavenge treats weak pointers as normal roots.
- HEAP->PerformScavenge();
+ heap->PerformScavenge();
CHECK((*h1)->IsString());
CHECK((*h2)->IsHeapNumber());
@@ -397,7 +422,10 @@ TEST(WeakGlobalHandlesScavenge) {
TEST(WeakGlobalHandlesMark) {
InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
WeakPointerCleared = false;
@@ -405,26 +433,29 @@ TEST(WeakGlobalHandlesMark) {
Handle<Object> h2;
{
- HandleScope scope;
+ HandleScope scope(isolate);
- Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
- Handle<Object> u = FACTORY->NewNumber(1.12344);
+ Handle<Object> i = factory->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> u = factory->NewNumber(1.12344);
h1 = global_handles->Create(*i);
h2 = global_handles->Create(*u);
}
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
- HEAP->CollectGarbage(NEW_SPACE);
- // Make sure the object is promoted.
+ // Make sure the objects are promoted.
+ heap->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(NEW_SPACE);
+ CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2));
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&TestWeakGlobalHandleCallback);
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ // Incremental marking potentially marked handles before they turned weak.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK((*h1)->IsString());
@@ -437,30 +468,34 @@ TEST(WeakGlobalHandlesMark) {
TEST(DeleteWeakGlobalHandle) {
InitializeVM();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
WeakPointerCleared = false;
Handle<Object> h;
{
- HandleScope scope;
+ HandleScope scope(isolate);
- Handle<Object> i = FACTORY->NewStringFromAscii(CStrVector("fisk"));
+ Handle<Object> i = factory->NewStringFromAscii(CStrVector("fisk"));
h = global_handles->Create(*i);
}
global_handles->MakeWeak(h.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&TestWeakGlobalHandleCallback);
// Scanvenge does not recognize weak reference.
- HEAP->PerformScavenge();
+ heap->PerformScavenge();
CHECK(!WeakPointerCleared);
// Mark-compact treats weak reference properly.
- HEAP->CollectGarbage(OLD_POINTER_SPACE);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
CHECK(WeakPointerCleared);
}
@@ -530,27 +565,27 @@ static const char* not_so_random_string_table[] = {
};
-static void CheckSymbols(const char** strings) {
+static void CheckInternalizedStrings(const char** strings) {
for (const char* string = *strings; *strings != 0; string = *strings++) {
Object* a;
- MaybeObject* maybe_a = HEAP->LookupAsciiSymbol(string);
- // LookupAsciiSymbol may return a failure if a GC is needed.
+ MaybeObject* maybe_a = HEAP->InternalizeUtf8String(string);
+ // InternalizeUtf8String may return a failure if a GC is needed.
if (!maybe_a->ToObject(&a)) continue;
- CHECK(a->IsSymbol());
+ CHECK(a->IsInternalizedString());
Object* b;
- MaybeObject* maybe_b = HEAP->LookupAsciiSymbol(string);
+ MaybeObject* maybe_b = HEAP->InternalizeUtf8String(string);
if (!maybe_b->ToObject(&b)) continue;
CHECK_EQ(b, a);
- CHECK(String::cast(b)->IsEqualTo(CStrVector(string)));
+ CHECK(String::cast(b)->IsUtf8EqualTo(CStrVector(string)));
}
}
-TEST(SymbolTable) {
+TEST(StringTable) {
InitializeVM();
- CheckSymbols(not_so_random_string_table);
- CheckSymbols(not_so_random_string_table);
+ CheckInternalizedStrings(not_so_random_string_table);
+ CheckInternalizedStrings(not_so_random_string_table);
}
@@ -558,14 +593,14 @@ TEST(FunctionAllocation) {
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
+ Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
FACTORY->NewFunction(name, FACTORY->undefined_value());
Handle<Map> initial_map =
FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+ Handle<String> prop_name = FACTORY->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = FACTORY->NewJSObject(function);
obj->SetProperty(
*prop_name, Smi::FromInt(23), NONE, kNonStrictMode)->ToObjectChecked();
@@ -581,14 +616,14 @@ TEST(ObjectProperties) {
InitializeVM();
v8::HandleScope sc;
- String* object_symbol = String::cast(HEAP->Object_symbol());
+ String* object_string = String::cast(HEAP->Object_string());
Object* raw_object = Isolate::Current()->context()->global_object()->
- GetProperty(object_symbol)->ToObjectChecked();
+ GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
- Handle<String> first = FACTORY->LookupAsciiSymbol("first");
- Handle<String> second = FACTORY->LookupAsciiSymbol("second");
+ Handle<String> first = FACTORY->InternalizeUtf8String("first");
+ Handle<String> second = FACTORY->InternalizeUtf8String("second");
// check for empty
CHECK(!obj->HasLocalProperty(*first));
@@ -632,19 +667,19 @@ TEST(ObjectProperties) {
CHECK(!obj->HasLocalProperty(*first));
CHECK(!obj->HasLocalProperty(*second));
- // check string and symbol match
+ // check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = FACTORY->NewStringFromAscii(CStrVector(string1));
obj->SetProperty(
*s1, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
- Handle<String> s1_symbol = FACTORY->LookupAsciiSymbol(string1);
- CHECK(obj->HasLocalProperty(*s1_symbol));
+ Handle<String> s1_string = FACTORY->InternalizeUtf8String(string1);
+ CHECK(obj->HasLocalProperty(*s1_string));
- // check symbol and string match
+ // check internalized string and string match
const char* string2 = "fugl";
- Handle<String> s2_symbol = FACTORY->LookupAsciiSymbol(string2);
+ Handle<String> s2_string = FACTORY->InternalizeUtf8String(string2);
obj->SetProperty(
- *s2_symbol, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
+ *s2_string, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
Handle<String> s2 = FACTORY->NewStringFromAscii(CStrVector(string2));
CHECK(obj->HasLocalProperty(*s2));
}
@@ -654,14 +689,14 @@ TEST(JSObjectMaps) {
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = FACTORY->LookupAsciiSymbol("theFunction");
+ Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
FACTORY->NewFunction(name, FACTORY->undefined_value());
Handle<Map> initial_map =
FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
function->set_initial_map(*initial_map);
- Handle<String> prop_name = FACTORY->LookupAsciiSymbol("theSlot");
+ Handle<String> prop_name = FACTORY->InternalizeUtf8String("theSlot");
Handle<JSObject> obj = FACTORY->NewJSObject(function);
// Set a propery
@@ -678,7 +713,7 @@ TEST(JSArray) {
InitializeVM();
v8::HandleScope sc;
- Handle<String> name = FACTORY->LookupAsciiSymbol("Array");
+ Handle<String> name = FACTORY->InternalizeUtf8String("Array");
Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
Handle<JSFunction> function = Handle<JSFunction>(
@@ -725,14 +760,14 @@ TEST(JSObjectCopy) {
InitializeVM();
v8::HandleScope sc;
- String* object_symbol = String::cast(HEAP->Object_symbol());
+ String* object_string = String::cast(HEAP->Object_string());
Object* raw_object = Isolate::Current()->context()->global_object()->
- GetProperty(object_symbol)->ToObjectChecked();
+ GetProperty(object_string)->ToObjectChecked();
JSFunction* object_function = JSFunction::cast(raw_object);
Handle<JSFunction> constructor(object_function);
Handle<JSObject> obj = FACTORY->NewJSObject(constructor);
- Handle<String> first = FACTORY->LookupAsciiSymbol("first");
- Handle<String> second = FACTORY->LookupAsciiSymbol("second");
+ Handle<String> first = FACTORY->InternalizeUtf8String("first");
+ Handle<String> second = FACTORY->InternalizeUtf8String("second");
obj->SetProperty(
*first, Smi::FromInt(1), NONE, kNonStrictMode)->ToObjectChecked();
@@ -787,10 +822,11 @@ TEST(StringAllocation) {
non_ascii[3 * i + 2] = chars[2];
}
Handle<String> non_ascii_sym =
- FACTORY->LookupSymbol(Vector<const char>(non_ascii, 3 * length));
+ FACTORY->InternalizeUtf8String(
+ Vector<const char>(non_ascii, 3 * length));
CHECK_EQ(length, non_ascii_sym->length());
Handle<String> ascii_sym =
- FACTORY->LookupSymbol(Vector<const char>(ascii, length));
+ FACTORY->InternalizeOneByteString(OneByteVector(ascii, length));
CHECK_EQ(length, ascii_sym->length());
Handle<String> non_ascii_str =
FACTORY->NewStringFromUtf8(Vector<const char>(non_ascii, 3 * length));
@@ -806,10 +842,10 @@ TEST(StringAllocation) {
}
-static int ObjectsFoundInHeap(Handle<Object> objs[], int size) {
+static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
// Count the number of objects found in the heap.
int found_count = 0;
- HeapIterator iterator;
+ HeapIterator iterator(heap);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
for (int i = 0; i < size; i++) {
if (*objs[i] == obj) {
@@ -855,7 +891,7 @@ TEST(Iteration) {
objs[next_objs_index++] = Handle<Map>(HeapObject::cast(*objs[0])->map());
CHECK_EQ(objs_count, next_objs_index);
- CHECK_EQ(objs_count, ObjectsFoundInHeap(objs, objs_count));
+ CHECK_EQ(objs_count, ObjectsFoundInHeap(HEAP, objs, objs_count));
}
@@ -967,7 +1003,7 @@ TEST(TestCodeFlushing) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+ Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope;
@@ -982,8 +1018,8 @@ TEST(TestCodeFlushing) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
@@ -1014,7 +1050,7 @@ TEST(TestCodeFlushingIncremental) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+ Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope;
@@ -1029,14 +1065,13 @@ TEST(TestCodeFlushingIncremental) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- HEAP->incremental_marking()->Abort();
SimulateIncrementalMarking();
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
}
@@ -1051,7 +1086,6 @@ TEST(TestCodeFlushingIncremental) {
// Simulate several GCs that use incremental marking but make sure
// the loop breaks once the function is enqueued as a candidate.
for (int i = 0; i < kAgingThreshold; i++) {
- HEAP->incremental_marking()->Abort();
SimulateIncrementalMarking();
if (!function->next_function_link()->IsUndefined()) break;
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
@@ -1086,11 +1120,11 @@ TEST(TestCodeFlushingIncrementalScavenge) {
" var x = 23;"
"};"
"bar();";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
- Handle<String> bar_name = FACTORY->LookupAsciiSymbol("bar");
+ Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
+ Handle<String> bar_name = FACTORY->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope;
@@ -1117,8 +1151,10 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// Bump the code age so that flushing is triggered while the function
// object is still located in new-space.
const int kAgingThreshold = 6;
- function->shared()->set_code_age(kAgingThreshold);
- function2->shared()->set_code_age(kAgingThreshold);
+ for (int i = 0; i < kAgingThreshold; i++) {
+ function->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ function2->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
// Simulate incremental marking so that the functions are enqueued as
// code flushing candidates. Then kill one of the functions. Finally
@@ -1139,6 +1175,8 @@ TEST(TestCodeFlushingIncrementalAbort) {
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::HandleScope scope;
const char* source = "function foo() {"
" var x = 42;"
@@ -1146,7 +1184,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
" var z = x + y;"
"};"
"foo()";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
+ Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope;
@@ -1161,13 +1199,15 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
CHECK(function->shared()->is_compiled());
// Bump the code age so that flushing is triggered.
const int kAgingThreshold = 6;
- function->shared()->set_code_age(kAgingThreshold);
+ for (int i = 0; i < kAgingThreshold; i++) {
+ function->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
// Simulate incremental marking so that the function is enqueued as
// code flushing candidate.
@@ -1177,9 +1217,9 @@ TEST(TestCodeFlushingIncrementalAbort) {
// is running so that incremental marking aborts and code flushing is
// disabled.
int position = 0;
- Handle<Object> breakpoint_object(Smi::FromInt(0));
- ISOLATE->debug()->SetBreakPoint(function, breakpoint_object, &position);
- ISOLATE->debug()->ClearAllBreakPoints();
+ Handle<Object> breakpoint_object(Smi::FromInt(0), isolate);
+ isolate->debug()->SetBreakPoint(function, breakpoint_object, &position);
+ isolate->debug()->ClearAllBreakPoints();
// Force optimization now that code flushing is disabled.
{ v8::HandleScope scope;
@@ -1187,7 +1227,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1222,8 +1262,14 @@ static int CountOptimizedUserFunctions(v8::Handle<v8::Context> context) {
TEST(TestInternalWeakLists) {
v8::V8::Initialize();
+ // Some flags turn Scavenge collections into Mark-sweep collections
+ // and hence are incompatible with this test case.
+ if (FLAG_gc_global || FLAG_stress_compaction) return;
+
static const int kNumTestContexts = 10;
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::HandleScope scope;
v8::Persistent<v8::Context> ctx[kNumTestContexts];
@@ -1270,8 +1316,8 @@ TEST(TestInternalWeakLists) {
}
// Mark compact handles the weak references.
- ISOLATE->compilation_cache()->Clear();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ isolate->compilation_cache()->Clear();
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
CHECK_EQ(opt ? 4 : 0, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
@@ -1298,7 +1344,7 @@ TEST(TestInternalWeakLists) {
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
- ctx[i].Dispose();
+ ctx[i].Dispose(ctx[i]->GetIsolate());
ctx[i].Clear();
// Scavenge treats these references as strong.
@@ -1318,14 +1364,16 @@ TEST(TestInternalWeakLists) {
// Count the number of native contexts in the weak list of native contexts
// causing a GC after the specified number of elements.
-static int CountNativeContextsWithGC(int n) {
+static int CountNativeContextsWithGC(Isolate* isolate, int n) {
+ Heap* heap = isolate->heap();
int count = 0;
- Handle<Object> object(HEAP->native_contexts_list());
+ Handle<Object> object(heap->native_contexts_list(), isolate);
while (!object->IsUndefined()) {
count++;
- if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ if (count == n) heap->CollectAllGarbage(Heap::kNoGCFlags);
object =
- Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK));
+ Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK),
+ isolate);
}
return count;
}
@@ -1338,13 +1386,16 @@ static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
int n) {
int count = 0;
Handle<Context> icontext = v8::Utils::OpenHandle(*context);
- Handle<Object> object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+ Isolate* isolate = icontext->GetIsolate();
+ Handle<Object> object(icontext->get(Context::OPTIMIZED_FUNCTIONS_LIST),
+ isolate);
while (object->IsJSFunction() &&
!Handle<JSFunction>::cast(object)->IsBuiltin()) {
count++;
- if (count == n) HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ if (count == n) isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
object = Handle<Object>(
- Object::cast(JSFunction::cast(*object)->next_function_link()));
+ Object::cast(JSFunction::cast(*object)->next_function_link()),
+ isolate);
}
return count;
}
@@ -1352,6 +1403,7 @@ static int CountOptimizedUserFunctionsWithGC(v8::Handle<v8::Context> context,
TEST(TestInternalWeakListsTraverseWithGC) {
v8::V8::Initialize();
+ Isolate* isolate = Isolate::Current();
static const int kNumTestContexts = 10;
@@ -1365,7 +1417,7 @@ TEST(TestInternalWeakListsTraverseWithGC) {
for (int i = 0; i < kNumTestContexts; i++) {
ctx[i] = v8::Context::New();
CHECK_EQ(i + 1, CountNativeContexts());
- CHECK_EQ(i + 1, CountNativeContextsWithGC(i / 2 + 1));
+ CHECK_EQ(i + 1, CountNativeContextsWithGC(isolate, i / 2 + 1));
}
bool opt = (FLAG_always_opt && i::V8::UseCrankshaft());
@@ -1410,7 +1462,7 @@ TEST(TestSizeOfObjects) {
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(HEAP->old_pointer_space()->IsSweepingComplete());
+ CHECK(HEAP->old_pointer_space()->IsLazySweepingComplete());
int initial_size = static_cast<int>(HEAP->SizeOfObjects());
{
@@ -1434,7 +1486,7 @@ TEST(TestSizeOfObjects) {
CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
// Advancing the sweeper step-wise should not change the heap size.
- while (!HEAP->old_pointer_space()->IsSweepingComplete()) {
+ while (!HEAP->old_pointer_space()->IsLazySweepingComplete()) {
HEAP->old_pointer_space()->AdvanceSweeper(KB);
CHECK_EQ(initial_size, static_cast<int>(HEAP->SizeOfObjects()));
}
@@ -1445,7 +1497,7 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
InitializeVM();
HEAP->EnsureHeapIsIterable();
intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
- HeapIterator iterator;
+ HeapIterator iterator(HEAP);
intptr_t size_of_objects_2 = 0;
for (HeapObject* obj = iterator.next();
obj != NULL;
@@ -1566,7 +1618,7 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
static int NumberOfGlobalObjects() {
int count = 0;
- HeapIterator iterator;
+ HeapIterator iterator(HEAP);
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (obj->IsGlobalObject()) count++;
}
@@ -1602,12 +1654,12 @@ TEST(LeakNativeContextViaMap) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose();
+ ctx1.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
- ctx2.Dispose();
+ ctx2.Dispose(ctx2->GetIsolate());
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1641,12 +1693,12 @@ TEST(LeakNativeContextViaFunction) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose();
+ ctx1.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
- ctx2.Dispose();
+ ctx2.Dispose(ctx2->GetIsolate());
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1678,12 +1730,12 @@ TEST(LeakNativeContextViaMapKeyed) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose();
+ ctx1.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
- ctx2.Dispose();
+ ctx2.Dispose(ctx2->GetIsolate());
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1719,12 +1771,12 @@ TEST(LeakNativeContextViaMapProto) {
ctx2->Global()->Set(v8_str("o"), v8::Int32::New(0));
ctx2->Exit();
ctx1->Exit();
- ctx1.Dispose();
+ ctx1.Dispose(ctx1->GetIsolate());
v8::V8::ContextDisposedNotification();
}
HEAP->CollectAllAvailableGarbage();
CHECK_EQ((snapshot_enabled ? 3 : 2), NumberOfGlobalObjects());
- ctx2.Dispose();
+ ctx2.Dispose(ctx2->GetIsolate());
HEAP->CollectAllAvailableGarbage();
CHECK_EQ(0, NumberOfGlobalObjects());
}
@@ -1738,6 +1790,7 @@ TEST(InstanceOfStubWriteBarrier) {
InitializeVM();
if (!i::V8::UseCrankshaft()) return;
+ if (i::FLAG_force_marking_deque_overflows) return;
v8::HandleScope outer_scope;
{
@@ -1823,10 +1876,11 @@ TEST(PrototypeTransitionClearing) {
// Make sure next prototype is placed on an old-space evacuation candidate.
Handle<JSObject> prototype;
PagedSpace* space = HEAP->old_pointer_space();
- do {
+ {
+ AlwaysAllocateScope always_allocate;
+ SimulateFullSpace(space);
prototype = FACTORY->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
- } while (space->FirstPage() == space->LastPage() ||
- !space->LastPage()->Contains(prototype->address()));
+ }
// Add a prototype on an evacuation candidate and verify that transition
// clearing correctly records slots in prototype transition array.
@@ -1945,9 +1999,10 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope;
- FillUpNewSpace(HEAP->new_space());
+ SimulateFullSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
@@ -2098,10 +2153,6 @@ TEST(Regress2143b) {
}
-// Implemented in the test-alloc.cc test suite.
-void SimulateFullSpace(PagedSpace* space);
-
-
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
@@ -2124,7 +2175,7 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
- CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
@@ -2155,27 +2206,22 @@ TEST(Regress2237) {
v8::HandleScope inner_scope;
const char* c = "This text is long enough to trigger sliced strings.";
Handle<String> s = FACTORY->NewStringFromAscii(CStrVector(c));
- CHECK(s->IsSeqAsciiString());
+ CHECK(s->IsSeqOneByteString());
CHECK(HEAP->InNewSpace(*s));
// Generate a sliced string that is based on the above parent and
// lives in old-space.
- FillUpNewSpace(HEAP->new_space());
+ SimulateFullSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
- Handle<String> t;
- // TODO(mstarzinger): Unfortunately FillUpNewSpace() still leaves
- // some slack, so we need to allocate a few sliced strings.
- for (int i = 0; i < 16; i++) {
- t = FACTORY->NewProperSubString(s, 5, 35);
- }
+ Handle<String> t = FACTORY->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
CHECK(!HEAP->InNewSpace(*t));
*slice.location() = *t.location();
}
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
}
@@ -2221,7 +2267,7 @@ TEST(Regress2211) {
// Check values.
CHECK_EQ(hash,
- internal_obj->GetHiddenProperty(heap->identity_hash_symbol()));
+ internal_obj->GetHiddenProperty(heap->identity_hash_string()));
CHECK(value->Equals(obj->GetHiddenValue(v8_str("key string"))));
// Check size.
@@ -2381,7 +2427,7 @@ TEST(IncrementalMarkingClearsPolymorhpicIC) {
v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
- CHECK(ic_before->ic_state() == MEGAMORPHIC);
+ CHECK(ic_before->ic_state() == POLYMORPHIC);
// Fire context dispose notification.
v8::V8::ContextDisposedNotification();
@@ -2415,19 +2461,13 @@ class SourceResource: public v8::String::ExternalAsciiStringResource {
};
-TEST(ReleaseStackTraceData) {
+void ReleaseStackTraceDataTest(const char* source) {
// Test that the data retained by the Error.stack accessor is released
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
InitializeVM();
v8::HandleScope scope;
- static const char* source = "var error = 1; "
- "try { "
- " throw new Error(); "
- "} catch (e) { "
- " error = e; "
- "} ";
SourceResource* resource = new SourceResource(i::StrDup(source));
{
v8::HandleScope scope;
@@ -2436,20 +2476,35 @@ TEST(ReleaseStackTraceData) {
CHECK(!resource->IsDisposed());
}
HEAP->CollectAllAvailableGarbage();
- // External source is being retained by the stack trace.
- CHECK(!resource->IsDisposed());
- CompileRun("error.stack; error.stack;");
- HEAP->CollectAllAvailableGarbage();
// External source has been released.
CHECK(resource->IsDisposed());
-
delete resource;
}
+TEST(ReleaseStackTraceData) {
+ static const char* source1 = "var error = null; "
+ /* Normal Error */ "try { "
+ " throw new Error(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
+ static const char* source2 = "var error = null; "
+ /* Stack overflow */ "try { "
+ " (function f() { f(); })(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
+ ReleaseStackTraceDataTest(source1);
+ ReleaseStackTraceDataTest(source2);
+}
+
+
TEST(Regression144230) {
InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::HandleScope scope;
// First make sure that the uninitialized CallIC stub is on a single page
@@ -2457,8 +2512,8 @@ TEST(Regression144230) {
{
v8::HandleScope inner_scope;
AlwaysAllocateScope always_allocate;
- SimulateFullSpace(HEAP->code_space());
- ISOLATE->stub_cache()->ComputeCallInitialize(9, RelocInfo::CODE_TARGET);
+ SimulateFullSpace(heap->code_space());
+ isolate->stub_cache()->ComputeCallInitialize(9, RelocInfo::CODE_TARGET);
}
// Second compile a CallIC and execute it once so that it gets patched to
@@ -2466,7 +2521,7 @@ TEST(Regression144230) {
{
v8::HandleScope inner_scope;
AlwaysAllocateScope always_allocate;
- SimulateFullSpace(HEAP->code_space());
+ SimulateFullSpace(heap->code_space());
CompileRun("var o = { f:function(a,b,c,d,e,f,g,h,i) {}};"
"function call() { o.f(1,2,3,4,5,6,7,8,9); };"
"call();");
@@ -2482,26 +2537,402 @@ TEST(Regression144230) {
" 'f' + i + '();');"
"}");
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
// Fourth is the tricky part. Make sure the code containing the CallIC is
// visited first without clearing the IC. The shared function info is then
// visited later, causing the CallIC to be cleared.
- Handle<String> name = FACTORY->LookupAsciiSymbol("call");
- Handle<GlobalObject> global(ISOLATE->context()->global_object());
+ Handle<String> name = isolate->factory()->InternalizeUtf8String("call");
+ Handle<GlobalObject> global(isolate->context()->global_object());
MaybeObject* maybe_call = global->GetProperty(*name);
JSFunction* call = JSFunction::cast(maybe_call->ToObjectChecked());
USE(global->SetProperty(*name, Smi::FromInt(0), NONE, kNonStrictMode));
- ISOLATE->compilation_cache()->Clear();
- call->shared()->set_ic_age(HEAP->global_ic_age() + 1);
- Handle<Object> call_code(call->code());
- Handle<Object> call_function(call);
+ isolate->compilation_cache()->Clear();
+ call->shared()->set_ic_age(heap->global_ic_age() + 1);
+ Handle<Object> call_code(call->code(), isolate);
+ Handle<Object> call_function(call, isolate);
// Now we are ready to mess up the heap.
- HEAP->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
+ heap->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
// Either heap verification caught the problem already or we go kaboom once
// the CallIC is executed the next time.
USE(global->SetProperty(*name, *call_function, NONE, kNonStrictMode));
CompileRun("call();");
}
+
+
+TEST(Regress159140) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_flush_code_incrementally = true;
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope;
+
+ // Perform one initial GC to enable code flushing.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Prepare several closures that are all eligible for code flushing
+ // because all reachable ones are not optimized. Make sure that the
+ // optimized code object is directly reachable through a handle so
+ // that it is marked black during incremental marking.
+ Handle<Code> code;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function h(x) {}"
+ "function mkClosure() {"
+ " return function(x) { return x + 1; };"
+ "}"
+ "var f = mkClosure();"
+ "var g = mkClosure();"
+ "f(1); f(2);"
+ "g(1); g(2);"
+ "h(1); h(2);"
+ "%OptimizeFunctionOnNextCall(f); f(3);"
+ "%OptimizeFunctionOnNextCall(h); h(3);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->is_compiled());
+ CompileRun("f = null;");
+
+ Handle<JSFunction> g =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
+ CHECK(g->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ g->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ code = inner_scope.CloseAndEscape(Handle<Code>(f->code()));
+ }
+
+ // Simulate incremental marking so that the functions are enqueued as
+ // code flushing candidates. Then optimize one function. Finally
+ // finish the GC to complete code flushing.
+ SimulateIncrementalMarking();
+ CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+ // Unoptimized code is missing and the deoptimizer will go ballistic.
+ CompileRun("g('bozo');");
+}
+
+
+TEST(Regress165495) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_flush_code_incrementally = true;
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope;
+
+ // Perform one initial GC to enable code flushing.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Prepare an optimized closure that the optimized code map will get
+ // populated. Then age the unoptimized code to trigger code flushing
+ // but make sure the optimized code is unreachable.
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function mkClosure() {"
+ " return function(x) { return x + 1; };"
+ "}"
+ "var f = mkClosure();"
+ "f(1); f(2);"
+ "%OptimizeFunctionOnNextCall(f); f(3);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ CompileRun("f = null;");
+ }
+
+ // Simulate incremental marking so that unoptimized code is flushed
+ // even though it still is cached in the optimized code map.
+ SimulateIncrementalMarking();
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+ // Make a new closure that will get code installed from the code map.
+ // Unoptimized code is missing and the deoptimizer will go ballistic.
+ CompileRun("var g = mkClosure(); g('bozo');");
+}
+
+
+TEST(Regress169209) {
+ i::FLAG_stress_compaction = false;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_flush_code_incrementally = true;
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope;
+
+ // Perform one initial GC to enable code flushing.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Prepare a shared function info eligible for code flushing for which
+ // the unoptimized code will be replaced during optimization.
+ Handle<SharedFunctionInfo> shared1;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function f() { return 'foobar'; }"
+ "function g(x) { if (x) f(); }"
+ "f();"
+ "g(false);"
+ "g(false);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ shared1 = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+ }
+
+ // Prepare a shared function info eligible for code flushing that will
+ // represent the dangling tail of the candidate list.
+ Handle<SharedFunctionInfo> shared2;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function flushMe() { return 0; }"
+ "flushMe(1);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("flushMe"))));
+ CHECK(f->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ shared2 = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+ }
+
+ // Simulate incremental marking and collect code flushing candidates.
+ SimulateIncrementalMarking();
+ CHECK(shared1->code()->gc_metadata() != NULL);
+
+ // Optimize function and make sure the unoptimized code is replaced.
+#ifdef DEBUG
+ FLAG_stop_at = "f";
+#endif
+ CompileRun("%OptimizeFunctionOnNextCall(g);"
+ "g(false);");
+
+ // Finish garbage collection cycle.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK(shared1->code()->gc_metadata() == NULL);
+}
+
+
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+ int extra_bytes) {
+ int space_remaining = static_cast<int>(
+ *space->allocation_limit_address() - *space->allocation_top_address());
+ CHECK(space_remaining >= extra_bytes);
+ int new_linear_size = space_remaining - extra_bytes;
+ v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
+ v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
+ node->set_size(space->heap(), new_linear_size);
+}
+
+
+TEST(Regress169928) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_crankshaft = false;
+ InitializeVM();
+ v8::HandleScope scope;
+
+ // Some flags turn Scavenge collections into Mark-sweep collections
+ // and hence are incompatible with this test case.
+ if (FLAG_gc_global || FLAG_stress_compaction) return;
+
+ // Prepare the environment
+ CompileRun("function fastliteralcase(literal, value) {"
+ " literal[0] = value;"
+ " return literal;"
+ "}"
+ "function get_standard_literal() {"
+ " var literal = [1, 2, 3];"
+ " return literal;"
+ "}"
+ "obj = fastliteralcase(get_standard_literal(), 1);"
+ "obj = fastliteralcase(get_standard_literal(), 1.5);"
+ "obj = fastliteralcase(get_standard_literal(), 2);");
+
+ // prepare the heap
+ v8::Local<v8::String> mote_code_string =
+ v8_str("fastliteralcase(mote, 2.5);");
+
+ v8::Local<v8::String> array_name = v8_str("mote");
+ v8::Context::GetCurrent()->Global()->Set(array_name, v8::Int32::New(0));
+
+ // First make sure we flip spaces
+ HEAP->CollectGarbage(NEW_SPACE);
+
+ // Allocate the object.
+ Handle<FixedArray> array_data = FACTORY->NewFixedArray(2, NOT_TENURED);
+ array_data->set(0, Smi::FromInt(1));
+ array_data->set(1, Smi::FromInt(2));
+
+ AllocateAllButNBytes(HEAP->new_space(),
+ JSArray::kSize + AllocationSiteInfo::kSize +
+ kPointerSize);
+
+ Handle<JSArray> array = FACTORY->NewJSArrayWithElements(array_data,
+ FAST_SMI_ELEMENTS,
+ NOT_TENURED);
+
+ CHECK_EQ(Smi::FromInt(2), array->length());
+ CHECK(array->HasFastSmiOrObjectElements());
+
+ // We need filler the size of AllocationSiteInfo object, plus an extra
+ // fill pointer value.
+ MaybeObject* maybe_object = HEAP->AllocateRaw(
+ AllocationSiteInfo::kSize + kPointerSize, NEW_SPACE, OLD_POINTER_SPACE);
+ Object* obj = NULL;
+ CHECK(maybe_object->ToObject(&obj));
+ Address addr_obj = reinterpret_cast<Address>(
+ reinterpret_cast<byte*>(obj - kHeapObjectTag));
+ HEAP->CreateFillerObjectAt(addr_obj,
+ AllocationSiteInfo::kSize + kPointerSize);
+
+ // Give the array a name, making sure not to allocate strings.
+ v8::Handle<v8::Object> array_obj = v8::Utils::ToLocal(array);
+ v8::Context::GetCurrent()->Global()->Set(array_name, array_obj);
+
+ // This should crash with a protection violation if we are running a build
+ // with the bug.
+ AlwaysAllocateScope aa_scope;
+ v8::Script::Compile(mote_code_string)->Run();
+}
+
+
+TEST(Regress168801) {
+ i::FLAG_always_compact = true;
+ i::FLAG_cache_optimized_code = false;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_flush_code_incrementally = true;
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope;
+
+ // Perform one initial GC to enable code flushing.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Ensure the code ends up on an evacuation candidate.
+ SimulateFullSpace(heap->code_space());
+
+ // Prepare an unoptimized function that is eligible for code flushing.
+ Handle<JSFunction> function;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function mkClosure() {"
+ " return function(x) { return x + 1; };"
+ "}"
+ "var f = mkClosure();"
+ "f(1); f(2);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ function = inner_scope.CloseAndEscape(handle(*f, isolate));
+ }
+
+ // Simulate incremental marking so that unoptimized function is enqueued as a
+ // candidate for code flushing. The shared function info however will not be
+ // explicitly enqueued.
+ SimulateIncrementalMarking();
+
+ // Now optimize the function so that it is taken off the candidate list.
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("%OptimizeFunctionOnNextCall(f); f(3);");
+ }
+
+ // This cycle will bust the heap and subsequent cycles will go ballistic.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+}
+
+
+TEST(Regress173458) {
+ i::FLAG_always_compact = true;
+ i::FLAG_cache_optimized_code = false;
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_flush_code_incrementally = true;
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope;
+
+ // Perform one initial GC to enable code flushing.
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+
+ // Ensure the code ends up on an evacuation candidate.
+ SimulateFullSpace(heap->code_space());
+
+ // Prepare an unoptimized function that is eligible for code flushing.
+ Handle<JSFunction> function;
+ {
+ HandleScope inner_scope(isolate);
+ CompileRun("function mkClosure() {"
+ " return function(x) { return x + 1; };"
+ "}"
+ "var f = mkClosure();"
+ "f(1); f(2);");
+
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->is_compiled());
+ const int kAgingThreshold = 6;
+ for (int i = 0; i < kAgingThreshold; i++) {
+ f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
+ }
+
+ function = inner_scope.CloseAndEscape(handle(*f, isolate));
+ }
+
+ // Simulate incremental marking so that unoptimized function is enqueued as a
+ // candidate for code flushing. The shared function info however will not be
+ // explicitly enqueued.
+ SimulateIncrementalMarking();
+
+ // Now enable the debugger which in turn will disable code flushing.
+ CHECK(isolate->debug()->Load());
+
+ // This cycle will bust the heap and subsequent cycles will go ballistic.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+}
diff --git a/src/3rdparty/v8/test/cctest/test-lockers.cc b/src/3rdparty/v8/test/cctest/test-lockers.cc
index 5035f87..cd37546 100644
--- a/src/3rdparty/v8/test/cctest/test-lockers.cc
+++ b/src/3rdparty/v8/test/cctest/test-lockers.cc
@@ -59,9 +59,9 @@ using ::v8::V8;
class KangarooThread : public v8::internal::Thread {
public:
KangarooThread(v8::Isolate* isolate,
- v8::Handle<v8::Context> context, int value)
+ v8::Handle<v8::Context> context)
: Thread("KangarooThread"),
- isolate_(isolate), context_(context), value_(value) {
+ isolate_(isolate), context_(context) {
}
void Run() {
@@ -90,7 +90,6 @@ class KangarooThread : public v8::internal::Thread {
private:
v8::Isolate* isolate_;
Persistent<v8::Context> context_;
- int value_;
};
// Migrates an isolate from one thread to another
@@ -106,7 +105,7 @@ TEST(KangarooIsolates) {
CHECK_EQ(isolate, v8::internal::Isolate::Current());
CompileRun("function getValue() { return 30; }");
}
- KangarooThread thread1(isolate, context, 1);
+ KangarooThread thread1(isolate, context);
thread1.Start();
thread1.Join();
}
@@ -290,6 +289,7 @@ TEST(IsolateNestedLocking) {
threads.Add(new IsolateNestedLockingThread(isolate));
}
StartJoinAndDeleteThreads(threads);
+ isolate->Dispose();
}
@@ -537,7 +537,7 @@ class LockUnlockLockThread : public JoinableThread {
virtual void Run() {
v8::Locker lock1(isolate_);
CHECK(v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked());
+ CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
{
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope;
@@ -547,13 +547,13 @@ class LockUnlockLockThread : public JoinableThread {
{
v8::Unlocker unlock1(isolate_);
CHECK(!v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked());
+ CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
{
v8::Locker lock2(isolate_);
v8::Isolate::Scope isolate_scope(isolate_);
v8::HandleScope handle_scope;
CHECK(v8::Locker::IsLocked(isolate_));
- CHECK(!v8::Locker::IsLocked());
+ CHECK(!v8::Locker::IsLocked(CcTest::default_isolate()));
v8::Context::Scope context_scope(context_);
CalcFibAndCheck();
}
@@ -585,6 +585,7 @@ TEST(LockUnlockLockMultithreaded) {
threads.Add(new LockUnlockLockThread(isolate, context));
}
StartJoinAndDeleteThreads(threads);
+ isolate->Dispose();
}
class LockUnlockLockDefaultIsolateThread : public JoinableThread {
@@ -595,16 +596,16 @@ class LockUnlockLockDefaultIsolateThread : public JoinableThread {
}
virtual void Run() {
- v8::Locker lock1;
+ v8::Locker lock1(CcTest::default_isolate());
{
v8::HandleScope handle_scope;
v8::Context::Scope context_scope(context_);
CalcFibAndCheck();
}
{
- v8::Unlocker unlock1;
+ v8::Unlocker unlock1(CcTest::default_isolate());
{
- v8::Locker lock2;
+ v8::Locker lock2(CcTest::default_isolate());
v8::HandleScope handle_scope;
v8::Context::Scope context_scope(context_);
CalcFibAndCheck();
@@ -625,7 +626,7 @@ TEST(LockUnlockLockDefaultIsolateMultithreaded) {
#endif
Persistent<v8::Context> context;
{
- v8::Locker locker_;
+ v8::Locker locker_(CcTest::default_isolate());
v8::HandleScope handle_scope;
context = v8::Context::New();
}
@@ -650,7 +651,7 @@ TEST(Regress1433) {
v8::Handle<Script> script = v8::Script::Compile(source);
v8::Handle<Value> result = script->Run();
v8::String::AsciiValue ascii(result);
- context.Dispose();
+ context.Dispose(isolate);
}
isolate->Dispose();
}
@@ -678,7 +679,7 @@ class IsolateGenesisThread : public JoinableThread {
v8::ExtensionConfiguration extensions(count_, extension_names_);
v8::Persistent<v8::Context> context = v8::Context::New(&extensions);
CHECK(i::Isolate::Current()->has_installed_extensions());
- context.Dispose();
+ context.Dispose(isolate);
}
isolate->Dispose();
}
diff --git a/src/3rdparty/v8/test/cctest/test-log-stack-tracer.cc b/src/3rdparty/v8/test/cctest/test-log-stack-tracer.cc
index 6847ef7..8fe6916 100644
--- a/src/3rdparty/v8/test/cctest/test-log-stack-tracer.cc
+++ b/src/3rdparty/v8/test/cctest/test-log-stack-tracer.cc
@@ -214,7 +214,8 @@ static bool IsAddressWithinFuncCode(const char* func_name, Address addr) {
// from the calling function. When this function runs, the stack contains
// a C_Entry frame and a Construct frame above the calling function's frame.
static v8::Handle<Value> construct_call(const v8::Arguments& args) {
- i::StackFrameIterator frame_iterator;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
+ i::StackFrameIterator frame_iterator(isolate);
CHECK(frame_iterator.frame()->is_exit());
frame_iterator.Advance();
CHECK(frame_iterator.frame()->is_construct());
diff --git a/src/3rdparty/v8/test/cctest/test-log.cc b/src/3rdparty/v8/test/cctest/test-log.cc
index 6f2324d..9883bfa 100644
--- a/src/3rdparty/v8/test/cctest/test-log.cc
+++ b/src/3rdparty/v8/test/cctest/test-log.cc
@@ -370,7 +370,8 @@ TEST(LogCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
v8::Persistent<v8::FunctionTemplate> obj =
- v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
+ v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
v8::Local<v8::Signature> signature = v8::Signature::New(obj);
@@ -392,12 +393,12 @@ TEST(LogCallbacks) {
i::EmbeddedVector<char, 100> ref_data;
i::OS::SNPrintF(ref_data,
- "code-creation,Callback,0x%" V8PRIxPTR ",1,\"method1\"\0",
+ "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"method1\"\0",
ObjMethod1);
CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
- obj.Dispose();
+ obj.Dispose(v8::Isolate::GetCurrent());
}
@@ -420,7 +421,8 @@ TEST(LogAccessorCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
v8::Persistent<v8::FunctionTemplate> obj =
- v8::Persistent<v8::FunctionTemplate>::New(v8::FunctionTemplate::New());
+ v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
+ v8::FunctionTemplate::New());
obj->SetClassName(v8_str("Obj"));
v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
@@ -435,26 +437,26 @@ TEST(LogAccessorCallbacks) {
EmbeddedVector<char, 100> prop1_getter_record;
i::OS::SNPrintF(prop1_getter_record,
- "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop1\"",
+ "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"get prop1\"",
Prop1Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop1_getter_record.start(), log.length()));
EmbeddedVector<char, 100> prop1_setter_record;
i::OS::SNPrintF(prop1_setter_record,
- "code-creation,Callback,0x%" V8PRIxPTR ",1,\"set prop1\"",
+ "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"set prop1\"",
Prop1Setter);
CHECK_NE(NULL,
StrNStr(log.start(), prop1_setter_record.start(), log.length()));
EmbeddedVector<char, 100> prop2_getter_record;
i::OS::SNPrintF(prop2_getter_record,
- "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop2\"",
+ "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"get prop2\"",
Prop2Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop2_getter_record.start(), log.length()));
- obj.Dispose();
+ obj.Dispose(v8::Isolate::GetCurrent());
}
diff --git a/src/3rdparty/v8/test/cctest/test-macro-assembler-x64.cc b/src/3rdparty/v8/test/cctest/test-macro-assembler-x64.cc
index 59eeed9..d4d0edb 100755
--- a/src/3rdparty/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/src/3rdparty/v8/test/cctest/test-macro-assembler-x64.cc
@@ -157,10 +157,9 @@ TEST(SmiMove) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
masm->set_allow_stub_calls(false);
EntryCode(masm);
@@ -247,10 +246,9 @@ TEST(SmiCompare) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -299,10 +297,9 @@ TEST(Integer32ToSmi) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -404,7 +401,7 @@ void TestI64PlusConstantToSmi(MacroAssembler* masm,
ASSERT(Smi::IsValid(result));
__ movl(rax, Immediate(id));
__ Move(r8, Smi::FromInt(static_cast<int>(result)));
- __ movq(rcx, x, RelocInfo::NONE);
+ __ movq(rcx, x, RelocInfo::NONE64);
__ movq(r11, rcx);
__ Integer64PlusConstantToSmi(rdx, rcx, y);
__ cmpq(rdx, r8);
@@ -429,10 +426,9 @@ TEST(Integer64PlusConstantToSmi) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -475,10 +471,9 @@ TEST(SmiCheck) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -725,10 +720,9 @@ TEST(SmiNeg) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -815,10 +809,9 @@ TEST(SmiAdd) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1007,10 +1000,9 @@ TEST(SmiSub) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1099,10 +1091,9 @@ TEST(SmiMul) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1207,10 +1198,9 @@ TEST(SmiDiv) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1319,10 +1309,9 @@ TEST(SmiMod) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1417,10 +1406,9 @@ TEST(SmiIndex) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1488,10 +1476,9 @@ TEST(SmiSelectNonSmi) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false); // Avoid inline checks.
@@ -1569,10 +1556,9 @@ TEST(SmiAnd) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1652,10 +1638,9 @@ TEST(SmiOr) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1737,10 +1722,9 @@ TEST(SmiXor) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1806,10 +1790,9 @@ TEST(SmiNot) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -1904,10 +1887,9 @@ TEST(SmiShiftLeft) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2012,10 +1994,9 @@ TEST(SmiShiftLogicalRight) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2083,10 +2064,9 @@ TEST(SmiShiftArithmeticRight) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2149,10 +2129,9 @@ TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2194,10 +2173,9 @@ TEST(OperandOffset) {
&actual_size,
true));
CHECK(buffer);
- HandleScope handles;
- MacroAssembler assembler(Isolate::Current(),
- buffer,
- static_cast<int>(actual_size));
+ Isolate* isolate = Isolate::Current();
+ HandleScope handles(isolate);
+ MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size));
MacroAssembler* masm = &assembler;
masm->set_allow_stub_calls(false);
@@ -2227,7 +2205,7 @@ TEST(OperandOffset) {
__ lea(r13, Operand(rbp, -3 * kPointerSize));
__ lea(rbx, Operand(rbp, -5 * kPointerSize));
__ movl(rcx, Immediate(2));
- __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE);
+ __ movq(r8, reinterpret_cast<uintptr_t>(&data[128]), RelocInfo::NONE64);
__ movl(rax, Immediate(1));
Operand sp0 = Operand(rsp, 0);
diff --git a/src/3rdparty/v8/test/cctest/test-mark-compact.cc b/src/3rdparty/v8/test/cctest/test-mark-compact.cc
index c0ab763..ab819b7 100644
--- a/src/3rdparty/v8/test/cctest/test-mark-compact.cc
+++ b/src/3rdparty/v8/test/cctest/test-mark-compact.cc
@@ -53,6 +53,7 @@ static void InitializeVM() {
TEST(MarkingDeque) {
+ InitializeVM();
int mem_size = 20 * kPointerSize;
byte* mem = NewArray<byte>(20*kPointerSize);
Address low = reinterpret_cast<Address>(mem);
@@ -60,19 +61,20 @@ TEST(MarkingDeque) {
MarkingDeque s;
s.Initialize(low, high);
- Address address = NULL;
+ Address original_address = reinterpret_cast<Address>(&s);
+ Address current_address = original_address;
while (!s.IsFull()) {
- s.PushBlack(HeapObject::FromAddress(address));
- address += kPointerSize;
+ s.PushBlack(HeapObject::FromAddress(current_address));
+ current_address += kPointerSize;
}
while (!s.IsEmpty()) {
Address value = s.Pop()->address();
- address -= kPointerSize;
- CHECK_EQ(address, value);
+ current_address -= kPointerSize;
+ CHECK_EQ(current_address, value);
}
- CHECK_EQ(NULL, address);
+ CHECK_EQ(original_address, current_address);
DeleteArray(mem);
}
@@ -182,8 +184,8 @@ TEST(MarkCompactCollector) {
JSObject::kHeaderSize)->ToObjectChecked();
// allocate a garbage
- String* func_name =
- String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
+ String* func_name = String::cast(
+ HEAP->InternalizeUtf8String("theFunction")->ToObjectChecked());
SharedFunctionInfo* function_share = SharedFunctionInfo::cast(
HEAP->AllocateSharedFunctionInfo(func_name)->ToObjectChecked());
JSFunction* function = JSFunction::cast(
@@ -201,8 +203,8 @@ TEST(MarkCompactCollector) {
HEAP->AllocateJSObject(function)->ToObjectChecked());
HEAP->CollectGarbage(OLD_POINTER_SPACE);
- func_name =
- String::cast(HEAP->LookupAsciiSymbol("theFunction")->ToObjectChecked());
+ func_name = String::cast(
+ HEAP->InternalizeUtf8String("theFunction")->ToObjectChecked());
CHECK(Isolate::Current()->context()->global_object()->
HasLocalProperty(func_name));
Object* func_value = Isolate::Current()->context()->global_object()->
@@ -212,11 +214,11 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(HEAP->AllocateJSObject(function)->ToObjectChecked());
String* obj_name =
- String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+ String::cast(HEAP->InternalizeUtf8String("theObject")->ToObjectChecked());
Isolate::Current()->context()->global_object()->SetProperty(
obj_name, obj, NONE, kNonStrictMode)->ToObjectChecked();
String* prop_name =
- String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
+ String::cast(HEAP->InternalizeUtf8String("theSlot")->ToObjectChecked());
obj->SetProperty(prop_name,
Smi::FromInt(23),
NONE,
@@ -225,7 +227,7 @@ TEST(MarkCompactCollector) {
HEAP->CollectGarbage(OLD_POINTER_SPACE);
obj_name =
- String::cast(HEAP->LookupAsciiSymbol("theObject")->ToObjectChecked());
+ String::cast(HEAP->InternalizeUtf8String("theObject")->ToObjectChecked());
CHECK(Isolate::Current()->context()->global_object()->
HasLocalProperty(obj_name));
CHECK(Isolate::Current()->context()->global_object()->
@@ -233,7 +235,7 @@ TEST(MarkCompactCollector) {
obj = JSObject::cast(Isolate::Current()->context()->global_object()->
GetProperty(obj_name)->ToObjectChecked());
prop_name =
- String::cast(HEAP->LookupAsciiSymbol("theSlot")->ToObjectChecked());
+ String::cast(HEAP->InternalizeUtf8String("theSlot")->ToObjectChecked());
CHECK(obj->GetProperty(prop_name) == Smi::FromInt(23));
}
@@ -304,13 +306,16 @@ TEST(GCCallback) {
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
+static void WeakPointerCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void* id) {
ASSERT(id == reinterpret_cast<void*>(1234));
NumberOfWeakCalls++;
- handle.Dispose();
+ handle.Dispose(isolate);
}
TEST(ObjectGroups) {
+ FLAG_incremental_marking = false;
InitializeVM();
GlobalHandles* global_handles = Isolate::Current()->global_handles();
@@ -325,12 +330,15 @@ TEST(ObjectGroups) {
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g1s1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
global_handles->MakeWeak(g1s2.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
Handle<Object> g2s1 =
@@ -341,12 +349,15 @@ TEST(ObjectGroups) {
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
global_handles->MakeWeak(g2s1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
global_handles->MakeWeak(g2s2.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
Handle<Object> root = global_handles->Create(*g1s1); // make a root.
@@ -376,6 +387,7 @@ TEST(ObjectGroups) {
// Weaken the root.
global_handles->MakeWeak(root.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
// But make children strong roots---all the objects (except for children)
// should be collectable now.
@@ -404,9 +416,11 @@ TEST(ObjectGroups) {
// And now make children weak again and collect them.
global_handles->MakeWeak(g1c1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
global_handles->MakeWeak(g2c1.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
HEAP->CollectGarbage(OLD_POINTER_SPACE);
@@ -472,6 +486,10 @@ static uintptr_t ReadLong(char* buffer, intptr_t* position, int base) {
}
+// The memory use computed this way is not entirely accurate and depends on
+// the way malloc allocates memory. That's why the memory use may seem to
+// increase even though the sum of the allocated object sizes decreases. It
+// also means that the memory use depends on the kernel and stdlib.
static intptr_t MemoryInUse() {
intptr_t memory_use = 0;
@@ -537,17 +555,18 @@ TEST(BootUpMemoryUse) {
if (initial_memory >= 0) {
InitializeVM();
intptr_t delta = MemoryInUse() - initial_memory;
- if (sizeof(initial_memory) == 8) {
+ printf("delta: %" V8_PTR_PREFIX "d kB\n", delta / 1024);
+ if (sizeof(initial_memory) == 8) { // 64-bit.
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(delta, 3600 * 1024); // 3396.
+ CHECK_LE(delta, 4000 * 1024);
} else {
- CHECK_LE(delta, 4000 * 1024); // 3948.
+ CHECK_LE(delta, 4500 * 1024);
}
- } else {
+ } else { // 32-bit.
if (v8::internal::Snapshot::IsEnabled()) {
- CHECK_LE(delta, 2500 * 1024); // 2400.
+ CHECK_LE(delta, 2900 * 1024);
} else {
- CHECK_LE(delta, 2860 * 1024); // 2760.
+ CHECK_LE(delta, 3400 * 1024);
}
}
}
diff --git a/src/3rdparty/v8/test/cctest/test-object-observe.cc b/src/3rdparty/v8/test/cctest/test-object-observe.cc
index 374dca4..7fc5bf7 100644
--- a/src/3rdparty/v8/test/cctest/test-object-observe.cc
+++ b/src/3rdparty/v8/test/cctest/test-object-observe.cc
@@ -30,6 +30,7 @@
#include "cctest.h"
using namespace v8;
+namespace i = v8::internal;
namespace {
// Need to create a new isolate when FLAG_harmony_observation is on.
@@ -166,6 +167,30 @@ TEST(DeliveryOrderingReentrant) {
CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
}
+TEST(DeliveryOrderingDeliverChangeRecords) {
+ HarmonyIsolate isolate;
+ HandleScope scope;
+ LocalContext context;
+ CompileRun(
+ "var obj = {};"
+ "var ordering = [];"
+ "function observer1() { ordering.push(1); if (!obj.b) obj.b = true };"
+ "function observer2() { ordering.push(2); };"
+ "Object.observe(obj, observer1);"
+ "Object.observe(obj, observer2);"
+ "obj.a = 1;"
+ "Object.deliverChangeRecords(observer2);");
+ CHECK_EQ(4, CompileRun("ordering.length")->Int32Value());
+ // First, observer2 is called due to deliverChangeRecords
+ CHECK_EQ(2, CompileRun("ordering[0]")->Int32Value());
+ // Then, observer1 is called when the stack unwinds
+ CHECK_EQ(1, CompileRun("ordering[1]")->Int32Value());
+ // observer1's mutation causes both 1 and 2 to be reactivated,
+ // with 1 having priority.
+ CHECK_EQ(1, CompileRun("ordering[2]")->Int32Value());
+ CHECK_EQ(2, CompileRun("ordering[3]")->Int32Value());
+}
+
TEST(ObjectHashTableGrowth) {
HarmonyIsolate isolate;
HandleScope scope;
@@ -194,3 +219,218 @@ TEST(ObjectHashTableGrowth) {
CompileRun("obj.foo = 'bar'");
CHECK(CompileRun("ran")->BooleanValue());
}
+
+TEST(GlobalObjectObservation) {
+ HarmonyIsolate isolate;
+ HandleScope scope;
+ LocalContext context;
+ Handle<Object> global_proxy = context->Global();
+ Handle<Object> inner_global = global_proxy->GetPrototype().As<Object>();
+ CompileRun(
+ "var records = [];"
+ "var global = this;"
+ "Object.observe(global, function(r) { [].push.apply(records, r) });"
+ "global.foo = 'hello';");
+ CHECK_EQ(1, CompileRun("records.length")->Int32Value());
+ CHECK(global_proxy->StrictEquals(CompileRun("records[0].object")));
+
+ // Detached, mutating the proxy has no effect.
+ context->DetachGlobal();
+ CompileRun("global.bar = 'goodbye';");
+ CHECK_EQ(1, CompileRun("records.length")->Int32Value());
+
+ // Mutating the global object directly still has an effect...
+ CompileRun("this.bar = 'goodbye';");
+ CHECK_EQ(2, CompileRun("records.length")->Int32Value());
+ CHECK(inner_global->StrictEquals(CompileRun("records[1].object")));
+
+ // Reattached, back to global proxy.
+ context->ReattachGlobal(global_proxy);
+ CompileRun("global.baz = 'again';");
+ CHECK_EQ(3, CompileRun("records.length")->Int32Value());
+ CHECK(global_proxy->StrictEquals(CompileRun("records[2].object")));
+
+ // Attached to a different context, should not leak mutations
+ // to the old context.
+ context->DetachGlobal();
+ {
+ LocalContext context2;
+ context2->DetachGlobal();
+ context2->ReattachGlobal(global_proxy);
+ CompileRun(
+ "var records2 = [];"
+ "Object.observe(this, function(r) { [].push.apply(records2, r) });"
+ "this.bat = 'context2';");
+ CHECK_EQ(1, CompileRun("records2.length")->Int32Value());
+ CHECK(global_proxy->StrictEquals(CompileRun("records2[0].object")));
+ }
+ CHECK_EQ(3, CompileRun("records.length")->Int32Value());
+
+ // Attaching by passing to Context::New
+ {
+ // Delegates to Context::New
+ LocalContext context3(NULL, Handle<ObjectTemplate>(), global_proxy);
+ CompileRun(
+ "var records3 = [];"
+ "Object.observe(this, function(r) { [].push.apply(records3, r) });"
+ "this.qux = 'context3';");
+ CHECK_EQ(1, CompileRun("records3.length")->Int32Value());
+ CHECK(global_proxy->StrictEquals(CompileRun("records3[0].object")));
+ }
+ CHECK_EQ(3, CompileRun("records.length")->Int32Value());
+}
+
+
+struct RecordExpectation {
+ Handle<Value> object;
+ const char* type;
+ const char* name;
+ Handle<Value> old_value;
+};
+
+// TODO(adamk): Use this helper elsewhere in this file.
+static void ExpectRecords(Handle<Value> records,
+ const RecordExpectation expectations[],
+ int num) {
+ CHECK(records->IsArray());
+ Handle<Array> recordArray = records.As<Array>();
+ CHECK_EQ(num, static_cast<int>(recordArray->Length()));
+ for (int i = 0; i < num; ++i) {
+ Handle<Value> record = recordArray->Get(i);
+ CHECK(record->IsObject());
+ Handle<Object> recordObj = record.As<Object>();
+ CHECK(expectations[i].object->StrictEquals(
+ recordObj->Get(String::New("object"))));
+ CHECK(String::New(expectations[i].type)->Equals(
+ recordObj->Get(String::New("type"))));
+ CHECK(String::New(expectations[i].name)->Equals(
+ recordObj->Get(String::New("name"))));
+ if (!expectations[i].old_value.IsEmpty()) {
+ CHECK(expectations[i].old_value->Equals(
+ recordObj->Get(String::New("oldValue"))));
+ }
+ }
+}
+
+#define EXPECT_RECORDS(records, expectations) \
+ ExpectRecords(records, expectations, ARRAY_SIZE(expectations))
+
+TEST(APITestBasicMutation) {
+ HarmonyIsolate isolate;
+ HandleScope scope;
+ LocalContext context;
+ Handle<Object> obj = Handle<Object>::Cast(CompileRun(
+ "var records = [];"
+ "var obj = {};"
+ "function observer(r) { [].push.apply(records, r); };"
+ "Object.observe(obj, observer);"
+ "obj"));
+ obj->Set(String::New("foo"), Number::New(7));
+ obj->Set(1, Number::New(2));
+ // ForceSet should work just as well as Set
+ obj->ForceSet(String::New("foo"), Number::New(3));
+ obj->ForceSet(Number::New(1), Number::New(4));
+ // Setting an indexed element via the property setting method
+ obj->Set(Number::New(1), Number::New(5));
+ // Setting with a non-String, non-uint32 key
+ obj->Set(Number::New(1.1), Number::New(6), DontDelete);
+ obj->Delete(String::New("foo"));
+ obj->Delete(1);
+ obj->ForceDelete(Number::New(1.1));
+
+ // Force delivery
+ // TODO(adamk): Should the above set methods trigger delivery themselves?
+ CompileRun("void 0");
+ CHECK_EQ(9, CompileRun("records.length")->Int32Value());
+ const RecordExpectation expected_records[] = {
+ { obj, "new", "foo", Handle<Value>() },
+ { obj, "new", "1", Handle<Value>() },
+ // Note: use 7 not 1 below, as the latter triggers a nifty VS10 compiler bug
+ // where instead of 1.0, a garbage value would be passed into Number::New.
+ { obj, "updated", "foo", Number::New(7) },
+ { obj, "updated", "1", Number::New(2) },
+ { obj, "updated", "1", Number::New(4) },
+ { obj, "new", "1.1", Handle<Value>() },
+ { obj, "deleted", "foo", Number::New(3) },
+ { obj, "deleted", "1", Number::New(5) },
+ { obj, "deleted", "1.1", Number::New(6) }
+ };
+ EXPECT_RECORDS(CompileRun("records"), expected_records);
+}
+
+TEST(HiddenPrototypeObservation) {
+ HarmonyIsolate isolate;
+ HandleScope scope;
+ LocalContext context;
+ Handle<FunctionTemplate> tmpl = FunctionTemplate::New();
+ tmpl->SetHiddenPrototype(true);
+ tmpl->InstanceTemplate()->Set(String::New("foo"), Number::New(75));
+ Handle<Object> proto = tmpl->GetFunction()->NewInstance();
+ Handle<Object> obj = Object::New();
+ obj->SetPrototype(proto);
+ context->Global()->Set(String::New("obj"), obj);
+ context->Global()->Set(String::New("proto"), proto);
+ CompileRun(
+ "var records;"
+ "function observer(r) { records = r; };"
+ "Object.observe(obj, observer);"
+ "obj.foo = 41;" // triggers a notification
+ "proto.foo = 42;"); // does not trigger a notification
+ const RecordExpectation expected_records[] = {
+ { obj, "updated", "foo", Number::New(75) }
+ };
+ EXPECT_RECORDS(CompileRun("records"), expected_records);
+ obj->SetPrototype(Null());
+ CompileRun("obj.foo = 43");
+ const RecordExpectation expected_records2[] = {
+ { obj, "new", "foo", Handle<Value>() }
+ };
+ EXPECT_RECORDS(CompileRun("records"), expected_records2);
+ obj->SetPrototype(proto);
+ CompileRun(
+ "Object.observe(proto, observer);"
+ "proto.bar = 1;"
+ "Object.unobserve(obj, observer);"
+ "obj.foo = 44;");
+ const RecordExpectation expected_records3[] = {
+ { proto, "new", "bar", Handle<Value>() }
+ // TODO(adamk): The below record should be emitted since proto is observed
+ // and has been modified. Not clear if this happens in practice.
+ // { proto, "updated", "foo", Number::New(43) }
+ };
+ EXPECT_RECORDS(CompileRun("records"), expected_records3);
+}
+
+
+static int NumberOfElements(i::Handle<i::JSWeakMap> map) {
+ return i::ObjectHashTable::cast(map->table())->NumberOfElements();
+}
+
+
+TEST(ObservationWeakMap) {
+ HarmonyIsolate isolate;
+ HandleScope scope;
+ LocalContext context;
+ CompileRun(
+ "var obj = {};"
+ "Object.observe(obj, function(){});"
+ "Object.getNotifier(obj);"
+ "obj = null;");
+ i::Handle<i::JSObject> observation_state = FACTORY->observation_state();
+ i::Handle<i::JSWeakMap> observerInfoMap =
+ i::Handle<i::JSWeakMap>::cast(
+ i::GetProperty(observation_state, "observerInfoMap"));
+ i::Handle<i::JSWeakMap> objectInfoMap =
+ i::Handle<i::JSWeakMap>::cast(
+ i::GetProperty(observation_state, "objectInfoMap"));
+ i::Handle<i::JSWeakMap> notifierTargetMap =
+ i::Handle<i::JSWeakMap>::cast(
+ i::GetProperty(observation_state, "notifierTargetMap"));
+ CHECK_EQ(1, NumberOfElements(observerInfoMap));
+ CHECK_EQ(1, NumberOfElements(objectInfoMap));
+ CHECK_EQ(1, NumberOfElements(notifierTargetMap));
+ HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(0, NumberOfElements(observerInfoMap));
+ CHECK_EQ(0, NumberOfElements(objectInfoMap));
+ CHECK_EQ(0, NumberOfElements(notifierTargetMap));
+}
diff --git a/src/3rdparty/v8/test/cctest/test-parsing.cc b/src/3rdparty/v8/test/cctest/test-parsing.cc
index 717c665..e99b5f2 100755..100644
--- a/src/3rdparty/v8/test/cctest/test-parsing.cc
+++ b/src/3rdparty/v8/test/cctest/test-parsing.cc
@@ -439,7 +439,7 @@ void TestCharacterStream(const char* ascii_source,
unsigned end = 0) {
if (end == 0) end = length;
unsigned sub_length = end - start;
- i::HandleScope test_scope;
+ i::HandleScope test_scope(i::Isolate::Current());
i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
for (unsigned i = 0; i < length; i++) {
uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
@@ -1041,6 +1041,31 @@ TEST(ScopePositions) {
}
+i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
+ i::Handle<i::String> format = v8::Utils::OpenHandle(
+ *v8::String::New(data->BuildMessage()));
+ i::Vector<const char*> args = data->BuildArgs();
+ i::Handle<i::JSArray> args_array = FACTORY->NewJSArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ i::JSArray::SetElement(args_array,
+ i,
+ v8::Utils::OpenHandle(*v8::String::New(args[i])),
+ NONE,
+ i::kNonStrictMode);
+ }
+ i::Handle<i::JSObject> builtins(i::Isolate::Current()->js_builtins_object());
+ i::Handle<i::Object> format_fun =
+ i::GetProperty(builtins, "FormatMessage");
+ i::Handle<i::Object> arg_handles[] = { format, args_array };
+ bool has_exception = false;
+ i::Handle<i::Object> result =
+ i::Execution::Call(format_fun, builtins, 2, arg_handles, &has_exception);
+ CHECK(!has_exception);
+ CHECK(result->IsString());
+ return i::Handle<i::String>::cast(result);
+}
+
+
void TestParserSync(i::Handle<i::String> source, int flags) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
@@ -1067,53 +1092,50 @@ void TestParserSync(i::Handle<i::String> source, int flags) {
i::FunctionLiteral* function = parser.ParseProgram();
i::FLAG_harmony_scoping = save_harmony_scoping;
- i::String* type_string = NULL;
+ // Check that preparsing fails iff parsing fails.
if (function == NULL) {
// Extract exception from the parser.
- i::Handle<i::String> type_symbol = FACTORY->LookupAsciiSymbol("type");
CHECK(i::Isolate::Current()->has_pending_exception());
i::MaybeObject* maybe_object = i::Isolate::Current()->pending_exception();
i::JSObject* exception = NULL;
CHECK(maybe_object->To(&exception));
+ i::Handle<i::JSObject> exception_handle(exception);
+ i::Handle<i::String> message_string =
+ i::Handle<i::String>::cast(i::GetProperty(exception_handle, "message"));
- // Get the type string.
- maybe_object = exception->GetProperty(*type_symbol);
- CHECK(maybe_object->To(&type_string));
- }
-
- // Check that preparsing fails iff parsing fails.
- if (data.has_error() && function != NULL) {
- i::OS::Print(
- "Preparser failed on:\n"
- "\t%s\n"
- "with error:\n"
- "\t%s\n"
- "However, the parser succeeded",
- *source->ToCString(), data.BuildMessage());
- CHECK(false);
- } else if (!data.has_error() && function == NULL) {
- i::OS::Print(
- "Parser failed on:\n"
- "\t%s\n"
- "with error:\n"
- "\t%s\n"
- "However, the preparser succeeded",
- *source->ToCString(), *type_string->ToCString());
- CHECK(false);
- }
-
- // Check that preparser and parser produce the same error.
- if (function == NULL) {
- if (!type_string->IsEqualTo(i::CStrVector(data.BuildMessage()))) {
+ if (!data.has_error()) {
+ i::OS::Print(
+ "Parser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the preparser succeeded",
+ *source->ToCString(), *message_string->ToCString());
+ CHECK(false);
+ }
+ // Check that preparser and parser produce the same error.
+ i::Handle<i::String> preparser_message = FormatMessage(&data);
+ if (!message_string->Equals(*preparser_message)) {
i::OS::Print(
"Expected parser and preparser to produce the same error on:\n"
"\t%s\n"
"However, found the following error messages\n"
"\tparser: %s\n"
"\tpreparser: %s\n",
- *source->ToCString(), *type_string->ToCString(), data.BuildMessage());
+ *source->ToCString(),
+ *message_string->ToCString(),
+ *preparser_message->ToCString());
CHECK(false);
}
+ } else if (data.has_error()) {
+ i::OS::Print(
+ "Preparser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the parser succeeded",
+ *source->ToCString(), *FormatMessage(&data)->ToCString());
+ CHECK(false);
}
}
diff --git a/src/3rdparty/v8/test/cctest/test-platform.cc b/src/3rdparty/v8/test/cctest/test-platform.cc
new file mode 100644
index 0000000..7c8d4a4
--- /dev/null
+++ b/src/3rdparty/v8/test/cctest/test-platform.cc
@@ -0,0 +1,99 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "cctest.h"
+#include "platform.h"
+
+using namespace ::v8::internal;
+
+TEST(NumberOfCores) {
+ CHECK_GT(OS::NumberOfCores(), 0);
+}
+
+
+#ifdef __GNUC__
+#define ASM __asm__ __volatile__
+
+#if defined(_M_X64) || defined(__x86_64__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov %%rsp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(_M_IX86) || defined(__i386__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("mov %%esp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(__ARMEL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("str %%sp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#elif defined(__MIPSEL__)
+#define GET_STACK_POINTER() \
+ static int sp_addr = 0; \
+ do { \
+ ASM("sw $sp, %0" : "=g" (sp_addr)); \
+ } while (0)
+#else
+#error Host architecture was not detected as supported by v8
+#endif
+
+void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ GET_STACK_POINTER();
+ args.GetReturnValue().Set(v8_num(sp_addr));
+}
+
+TEST(StackAlignment) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
+ global_template->Set(v8_str("get_stack_pointer"),
+ v8::FunctionTemplate::New(GetStackPointer));
+
+ LocalContext env(NULL, global_template);
+ CompileRun(
+ "function foo() {"
+ " return get_stack_pointer();"
+ "}");
+
+ v8::Local<v8::Object> global_object = env->Global();
+ v8::Local<v8::Function> foo =
+ v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
+
+ v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
+ CHECK_EQ(0, result->Int32Value() % OS::ActivationFrameAlignment());
+}
+
+#undef GET_STACK_POINTERS
+#undef ASM
+#endif // __GNUC__
diff --git a/src/3rdparty/v8/test/cctest/test-random.cc b/src/3rdparty/v8/test/cctest/test-random.cc
index 86d6d8c..32b626e 100644
--- a/src/3rdparty/v8/test/cctest/test-random.cc
+++ b/src/3rdparty/v8/test/cctest/test-random.cc
@@ -83,9 +83,10 @@ TEST(CrankshaftRandom) {
CompileRun("function f() { return Math.random(); }");
- Object* symbol = FACTORY->LookupAsciiSymbol("f")->ToObjectChecked();
+ Object* string = FACTORY->InternalizeOneByteString(STATIC_ASCII_VECTOR("f"))->
+ ToObjectChecked();
MaybeObject* fun_object =
- context->global_object()->GetProperty(String::cast(symbol));
+ context->global_object()->GetProperty(String::cast(string));
Handle<JSFunction> fun(JSFunction::cast(fun_object->ToObjectChecked()));
// Optimize function.
diff --git a/src/3rdparty/v8/test/cctest/test-regexp.cc b/src/3rdparty/v8/test/cctest/test-regexp.cc
index e433b92..1a65c06 100644
--- a/src/3rdparty/v8/test/cctest/test-regexp.cc
+++ b/src/3rdparty/v8/test/cctest/test-regexp.cc
@@ -98,7 +98,6 @@ static SmartArrayPointer<const char> Parse(const char* input) {
static bool CheckSimple(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
@@ -117,7 +116,6 @@ struct MinMaxPair {
static MinMaxPair CheckMinMaxMatch(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
- unibrow::Utf8InputBuffer<> buffer(input, StrLength(input));
ZoneScope zone_scope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
FlatStringReader reader(Isolate::Current(), CStrVector(input));
RegExpCompileData result;
@@ -716,7 +714,7 @@ class ContextInitializer {
}
~ContextInitializer() {
env_->Exit();
- env_.Dispose();
+ env_.Dispose(env_->GetIsolate());
}
private:
v8::Persistent<v8::Context> env_;
@@ -759,7 +757,7 @@ TEST(MacroAssemblerNativeSuccess) {
int captures[4] = {42, 37, 87, 117};
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
const byte* start_adr =
reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
@@ -805,7 +803,7 @@ TEST(MacroAssemblerNativeSimple) {
int captures[4] = {42, 37, 87, 117};
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -823,7 +821,7 @@ TEST(MacroAssemblerNativeSimple) {
CHECK_EQ(-1, captures[3]);
input = factory->NewStringFromAscii(CStrVector("barbarbar"));
- seq_input = Handle<SeqAsciiString>::cast(input);
+ seq_input = Handle<SeqOneByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
result = Execute(*code,
@@ -863,7 +861,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
int captures[4] = {42, 37, 87, 117};
const uc16 input_data[6] = {'f', 'o', 'o', 'f', 'o',
- static_cast<uc16>('\xa0')};
+ static_cast<uc16>(0x2603)};
Handle<String> input =
factory->NewStringFromTwoByte(Vector<const uc16>(input_data, 6));
Handle<SeqTwoByteString> seq_input = Handle<SeqTwoByteString>::cast(input);
@@ -884,7 +882,7 @@ TEST(MacroAssemblerNativeSimpleUC16) {
CHECK_EQ(-1, captures[3]);
const uc16 input_data2[9] = {'b', 'a', 'r', 'b', 'a', 'r', 'b', 'a',
- static_cast<uc16>('\xa0')};
+ static_cast<uc16>(0x2603)};
input = factory->NewStringFromTwoByte(Vector<const uc16>(input_data2, 9));
seq_input = Handle<SeqTwoByteString>::cast(input);
start_adr = seq_input->GetCharsAddress();
@@ -924,7 +922,7 @@ TEST(MacroAssemblerNativeBacktrack) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -967,7 +965,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("fooofo"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[4];
@@ -1072,7 +1070,7 @@ TEST(MacroAssemblernativeAtStart) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("foobar"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -1133,7 +1131,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
Handle<String> input =
factory->NewStringFromAscii(CStrVector("aBcAbCABCxYzab"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[4];
@@ -1234,7 +1232,7 @@ TEST(MacroAssemblerNativeRegisters) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("foofoofoofoofoo"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[6];
@@ -1278,7 +1276,7 @@ TEST(MacroAssemblerStackOverflow) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("dummy"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -1325,7 +1323,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("sample text"));
- Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
+ Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int captures[2];
diff --git a/src/3rdparty/v8/test/cctest/test-serialize.cc b/src/3rdparty/v8/test/cctest/test-serialize.cc
index 8279182..b07db0f 100644
--- a/src/3rdparty/v8/test/cctest/test-serialize.cc
+++ b/src/3rdparty/v8/test/cctest/test-serialize.cc
@@ -108,8 +108,6 @@ TEST(ExternalReferenceEncoder) {
Encode(encoder, Builtins::kArrayCode));
CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort),
Encode(encoder, Runtime::kAbort));
- CHECK_EQ(make_code(IC_UTILITY, IC::kLoadCallbackProperty),
- Encode(encoder, IC_Utility(IC::kLoadCallbackProperty)));
ExternalReference keyed_load_function_prototype =
ExternalReference(isolate->counters()->keyed_load_function_prototype());
CHECK_EQ(make_code(STATS_COUNTER, Counters::k_keyed_load_function_prototype),
@@ -132,6 +130,8 @@ TEST(ExternalReferenceEncoder) {
CHECK_EQ(make_code(UNCLASSIFIED, 3),
encoder.Encode(
ExternalReference::roots_array_start(isolate).address()));
+ CHECK_EQ(make_code(UNCLASSIFIED, 52),
+ encoder.Encode(ExternalReference::cpu_features().address()));
}
@@ -146,8 +146,6 @@ TEST(ExternalReferenceDecoder) {
CHECK_EQ(AddressOf(Runtime::kAbort),
decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION,
Runtime::kAbort)));
- CHECK_EQ(AddressOf(IC_Utility(IC::kLoadCallbackProperty)),
- decoder.Decode(make_code(IC_UTILITY, IC::kLoadCallbackProperty)));
ExternalReference keyed_load_function =
ExternalReference(isolate->counters()->keyed_load_function_prototype());
CHECK_EQ(keyed_load_function.address(),
@@ -249,7 +247,7 @@ static void Serialize() {
// will clear the pending fixups array, which would otherwise contain GC roots
// that would confuse the serialization/deserialization process.
v8::Persistent<v8::Context> env = v8::Context::New();
- env.Dispose();
+ env.Dispose(env->GetIsolate());
WriteToFile(FLAG_testing_serialization_file);
}
@@ -290,8 +288,9 @@ static void SanityCheck() {
#endif
CHECK(Isolate::Current()->global_object()->IsJSObject());
CHECK(Isolate::Current()->native_context()->IsContext());
- CHECK(HEAP->symbol_table()->IsSymbolTable());
- CHECK(!FACTORY->LookupAsciiSymbol("Empty")->IsFailure());
+ CHECK(HEAP->string_table()->IsStringTable());
+ CHECK(!FACTORY->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR("Empty"))->IsFailure());
}
@@ -361,18 +360,20 @@ TEST(PartialSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
Serializer::Enable();
v8::V8::Initialize();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::Persistent<v8::Context> env = v8::Context::New();
ASSERT(!env.IsEmpty());
env->Enter();
// Make sure all builtin scripts are cached.
- { HandleScope scope;
+ { HandleScope scope(isolate);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ isolate->bootstrapper()->NativesSourceLookup(i);
}
}
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
Object* raw_foo;
{
@@ -387,7 +388,7 @@ TEST(PartialSerialization) {
OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
env->Exit();
- env.Dispose();
+ env.Dispose(env->GetIsolate());
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(&startup_sink);
@@ -473,7 +474,7 @@ DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
CHECK(root->IsString());
}
v8::HandleScope handle_scope;
- Handle<Object> root_handle(root);
+ Handle<Object> root_handle(root, Isolate::Current());
Object* root2;
@@ -493,19 +494,21 @@ TEST(ContextSerialization) {
if (!Snapshot::HaveASnapshotToStartFrom()) {
Serializer::Enable();
v8::V8::Initialize();
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
v8::Persistent<v8::Context> env = v8::Context::New();
ASSERT(!env.IsEmpty());
env->Enter();
// Make sure all builtin scripts are cached.
- { HandleScope scope;
+ { HandleScope scope(isolate);
for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- Isolate::Current()->bootstrapper()->NativesSourceLookup(i);
+ isolate->bootstrapper()->NativesSourceLookup(i);
}
}
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of env.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -515,7 +518,7 @@ TEST(ContextSerialization) {
Object* raw_context = *(v8::Utils::OpenHandle(*env));
- env.Dispose();
+ env.Dispose(env->GetIsolate());
FileByteSink startup_sink(startup_name.start());
StartupSerializer startup_serializer(&startup_sink);
@@ -569,7 +572,7 @@ DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
CHECK(root->IsContext());
}
v8::HandleScope handle_scope;
- Handle<Object> root_handle(root);
+ Handle<Object> root_handle(root, Isolate::Current());
Object* root2;
diff --git a/src/3rdparty/v8/test/cctest/test-spaces.cc b/src/3rdparty/v8/test/cctest/test-spaces.cc
index 0e95704..1f362d7 100644
--- a/src/3rdparty/v8/test/cctest/test-spaces.cc
+++ b/src/3rdparty/v8/test/cctest/test-spaces.cc
@@ -121,9 +121,148 @@ class TestMemoryAllocatorScope {
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
+
+// Temporarily sets a given code range in an isolate.
+class TestCodeRangeScope {
+ public:
+ TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
+ : isolate_(isolate),
+ old_code_range_(isolate->code_range_) {
+ isolate->code_range_ = code_range;
+ }
+
+ ~TestCodeRangeScope() {
+ isolate_->code_range_ = old_code_range_;
+ }
+
+ private:
+ Isolate* isolate_;
+ CodeRange* old_code_range_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
+};
+
} } // namespace v8::internal
+static void VerifyMemoryChunk(Isolate* isolate,
+ Heap* heap,
+ CodeRange* code_range,
+ size_t reserve_area_size,
+ size_t commit_area_size,
+ size_t second_commit_area_size,
+ Executability executable) {
+ MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(),
+ heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
+ TestCodeRangeScope test_code_range_scope(isolate, code_range);
+
+ size_t header_size = (executable == EXECUTABLE)
+ ? MemoryAllocator::CodePageGuardStartOffset()
+ : MemoryChunk::kObjectStartOffset;
+ size_t guard_size = (executable == EXECUTABLE)
+ ? MemoryAllocator::CodePageGuardSize()
+ : 0;
+
+ MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
+ commit_area_size,
+ executable,
+ NULL);
+ size_t alignment = code_range->exists() ?
+ MemoryChunk::kAlignment : OS::CommitPageSize();
+ size_t reserved_size = ((executable == EXECUTABLE))
+ ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
+ alignment)
+ : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
+ CHECK(memory_chunk->size() == reserved_size);
+ CHECK(memory_chunk->area_start() < memory_chunk->address() +
+ memory_chunk->size());
+ CHECK(memory_chunk->area_end() <= memory_chunk->address() +
+ memory_chunk->size());
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
+
+ Address area_start = memory_chunk->area_start();
+
+ memory_chunk->CommitArea(second_commit_area_size);
+ CHECK(area_start == memory_chunk->area_start());
+ CHECK(memory_chunk->area_start() < memory_chunk->address() +
+ memory_chunk->size());
+ CHECK(memory_chunk->area_end() <= memory_chunk->address() +
+ memory_chunk->size());
+ CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
+ second_commit_area_size);
+
+ memory_allocator->Free(memory_chunk);
+ memory_allocator->TearDown();
+ delete memory_allocator;
+}
+
+
+static unsigned int Pseudorandom() {
+ static uint32_t lo = 2345;
+ lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
+ return lo & 0xFFFFF;
+}
+
+
+TEST(MemoryChunk) {
+ OS::SetUp();
+ Isolate* isolate = Isolate::Current();
+ isolate->InitializeLoggingAndCounters();
+ Heap* heap = isolate->heap();
+ CHECK(heap->ConfigureHeapDefault());
+
+ size_t reserve_area_size = 1 * MB;
+ size_t initial_commit_area_size, second_commit_area_size;
+
+ for (int i = 0; i < 100; i++) {
+ initial_commit_area_size = Pseudorandom();
+ second_commit_area_size = Pseudorandom();
+
+ // With CodeRange.
+ CodeRange* code_range = new CodeRange(isolate);
+ const int code_range_size = 32 * MB;
+ if (!code_range->SetUp(code_range_size)) return;
+
+ VerifyMemoryChunk(isolate,
+ heap,
+ code_range,
+ reserve_area_size,
+ initial_commit_area_size,
+ second_commit_area_size,
+ EXECUTABLE);
+
+ VerifyMemoryChunk(isolate,
+ heap,
+ code_range,
+ reserve_area_size,
+ initial_commit_area_size,
+ second_commit_area_size,
+ NOT_EXECUTABLE);
+ delete code_range;
+
+ // Without CodeRange.
+ code_range = NULL;
+ VerifyMemoryChunk(isolate,
+ heap,
+ code_range,
+ reserve_area_size,
+ initial_commit_area_size,
+ second_commit_area_size,
+ EXECUTABLE);
+
+ VerifyMemoryChunk(isolate,
+ heap,
+ code_range,
+ reserve_area_size,
+ initial_commit_area_size,
+ second_commit_area_size,
+ NOT_EXECUTABLE);
+ }
+}
+
+
TEST(MemoryAllocator) {
OS::SetUp();
Isolate* isolate = Isolate::Current();
diff --git a/src/3rdparty/v8/test/cctest/test-strings.cc b/src/3rdparty/v8/test/cctest/test-strings.cc
index 5a9ccbb..bf56dd1 100644
--- a/src/3rdparty/v8/test/cctest/test-strings.cc
+++ b/src/3rdparty/v8/test/cctest/test-strings.cc
@@ -1,7 +1,7 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Check that we can traverse very deep stacks of ConsStrings using
-// StringInputBuffer. Check that Get(int) works on very deep stacks
+// StringCharacterStram. Check that Get(int) works on very deep stacks
// of ConsStrings. These operations may not be very fast, but they
// should be possible without getting errors due to too deep recursion.
@@ -15,16 +15,57 @@
#include "cctest.h"
#include "zone-inl.h"
-unsigned int seed = 123;
+// Adapted from http://en.wikipedia.org/wiki/Multiply-with-carry
+class RandomNumberGenerator {
+ public:
+ RandomNumberGenerator() {
+ init();
+ }
-static uint32_t gen() {
- uint64_t z;
- z = seed;
- z *= 279470273;
- z %= 4294967291U;
- seed = static_cast<unsigned int>(z);
- return static_cast<uint32_t>(seed >> 16);
-}
+ void init(uint32_t seed = 0x5688c73e) {
+ static const uint32_t phi = 0x9e3779b9;
+ c = 362436;
+ i = kQSize-1;
+ Q[0] = seed;
+ Q[1] = seed + phi;
+ Q[2] = seed + phi + phi;
+ for (unsigned j = 3; j < kQSize; j++) {
+ Q[j] = Q[j - 3] ^ Q[j - 2] ^ phi ^ j;
+ }
+ }
+
+ uint32_t next() {
+ uint64_t a = 18782;
+ uint32_t r = 0xfffffffe;
+ i = (i + 1) & (kQSize-1);
+ uint64_t t = a * Q[i] + c;
+ c = (t >> 32);
+ uint32_t x = static_cast<uint32_t>(t + c);
+ if (x < c) {
+ x++;
+ c++;
+ }
+ return (Q[i] = r - x);
+ }
+
+ uint32_t next(int max) {
+ return next() % max;
+ }
+
+ bool next(double threshold) {
+ ASSERT(threshold >= 0.0 && threshold <= 1.0);
+ if (threshold == 1.0) return true;
+ if (threshold == 0.0) return false;
+ uint32_t value = next() % 100000;
+ return threshold > static_cast<double>(value)/100000.0;
+ }
+
+ private:
+ static const uint32_t kQSize = 4096;
+ uint32_t Q[kQSize];
+ uint32_t c;
+ uint32_t i;
+};
using namespace v8::internal;
@@ -39,12 +80,10 @@ static void InitializeVM() {
v8::ExtensionConfiguration config(1, extensions);
env = v8::Context::New(&config);
}
- v8::HandleScope scope;
env->Enter();
}
-static const int NUMBER_OF_BUILDING_BLOCKS = 128;
static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024;
@@ -79,21 +118,42 @@ class AsciiResource: public v8::String::ExternalAsciiStringResource,
};
-static void InitializeBuildingBlocks(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
+static void InitializeBuildingBlocks(Handle<String>* building_blocks,
+ int bb_length,
+ bool long_blocks,
+ RandomNumberGenerator* rng) {
// A list of pointers that we don't have any interest in cleaning up.
// If they are reachable from a root then leak detection won't complain.
Zone* zone = Isolate::Current()->runtime_zone();
- for (int i = 0; i < NUMBER_OF_BUILDING_BLOCKS; i++) {
- int len = gen() % 16;
- if (len > 14) {
+ for (int i = 0; i < bb_length; i++) {
+ int len = rng->next(16);
+ int slice_head_chars = 0;
+ int slice_tail_chars = 0;
+ int slice_depth = 0;
+ for (int j = 0; j < 3; j++) {
+ if (rng->next(0.35)) slice_depth++;
+ }
+ // Must truncate something for a slice string. Loop until
+ // at least one end will be sliced.
+ while (slice_head_chars == 0 && slice_tail_chars == 0) {
+ slice_head_chars = rng->next(15);
+ slice_tail_chars = rng->next(12);
+ }
+ if (long_blocks) {
+ // Generate building blocks which will never be merged
+ len += ConsString::kMinLength + 1;
+ } else if (len > 14) {
len += 1234;
}
- switch (gen() % 4) {
+ // Don't slice 0 length strings.
+ if (len == 0) slice_depth = 0;
+ int slice_length = slice_depth*(slice_head_chars + slice_tail_chars);
+ len += slice_length;
+ switch (rng->next(4)) {
case 0: {
uc16 buf[2000];
for (int j = 0; j < len; j++) {
- buf[j] = gen() % 65536;
+ buf[j] = rng->next(0x10000);
}
building_blocks[i] =
FACTORY->NewStringFromTwoByte(Vector<const uc16>(buf, len));
@@ -105,7 +165,7 @@ static void InitializeBuildingBlocks(
case 1: {
char buf[2000];
for (int j = 0; j < len; j++) {
- buf[j] = gen() % 128;
+ buf[j] = rng->next(0x80);
}
building_blocks[i] =
FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
@@ -117,7 +177,7 @@ static void InitializeBuildingBlocks(
case 2: {
uc16* buf = zone->NewArray<uc16>(len);
for (int j = 0; j < len; j++) {
- buf[j] = gen() % 65536;
+ buf[j] = rng->next(0x10000);
}
Resource* resource = new(zone) Resource(Vector<const uc16>(buf, len));
building_blocks[i] = FACTORY->NewExternalStringFromTwoByte(resource);
@@ -127,89 +187,347 @@ static void InitializeBuildingBlocks(
break;
}
case 3: {
- char* buf = NewArray<char>(len);
+ char* buf = zone->NewArray<char>(len);
for (int j = 0; j < len; j++) {
- buf[j] = gen() % 128;
+ buf[j] = rng->next(0x80);
}
- building_blocks[i] =
- FACTORY->NewStringFromAscii(Vector<const char>(buf, len));
+ AsciiResource* resource =
+ new(zone) AsciiResource(Vector<const char>(buf, len));
+ building_blocks[i] = FACTORY->NewExternalStringFromAscii(resource);
for (int j = 0; j < len; j++) {
CHECK_EQ(buf[j], building_blocks[i]->Get(j));
}
- DeleteArray<char>(buf);
break;
}
}
+ for (int j = slice_depth; j > 0; j--) {
+ building_blocks[i] = FACTORY->NewSubString(
+ building_blocks[i],
+ slice_head_chars,
+ building_blocks[i]->length() - slice_tail_chars);
+ }
+ CHECK(len == building_blocks[i]->length() + slice_length);
+ }
+}
+
+
+class ConsStringStats {
+ public:
+ ConsStringStats() {
+ Reset();
+ }
+ void Reset();
+ void VerifyEqual(const ConsStringStats& that) const;
+ unsigned leaves_;
+ unsigned empty_leaves_;
+ unsigned chars_;
+ unsigned left_traversals_;
+ unsigned right_traversals_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConsStringStats);
+};
+
+
+void ConsStringStats::Reset() {
+ leaves_ = 0;
+ empty_leaves_ = 0;
+ chars_ = 0;
+ left_traversals_ = 0;
+ right_traversals_ = 0;
+}
+
+
+void ConsStringStats::VerifyEqual(const ConsStringStats& that) const {
+ CHECK(this->leaves_ == that.leaves_);
+ CHECK(this->empty_leaves_ == that.empty_leaves_);
+ CHECK(this->chars_ == that.chars_);
+ CHECK(this->left_traversals_ == that.left_traversals_);
+ CHECK(this->right_traversals_ == that.right_traversals_);
+}
+
+
+class ConsStringGenerationData {
+ public:
+ static const int kNumberOfBuildingBlocks = 256;
+ explicit ConsStringGenerationData(bool long_blocks);
+ void Reset();
+ inline Handle<String> block(int offset);
+ inline Handle<String> block(uint32_t offset);
+ // Input variables.
+ double early_termination_threshold_;
+ double leftness_;
+ double rightness_;
+ double empty_leaf_threshold_;
+ unsigned max_leaves_;
+ // Cached data.
+ Handle<String> building_blocks_[kNumberOfBuildingBlocks];
+ String* empty_string_;
+ RandomNumberGenerator rng_;
+ // Stats.
+ ConsStringStats stats_;
+ unsigned early_terminations_;
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConsStringGenerationData);
+};
+
+
+ConsStringGenerationData::ConsStringGenerationData(bool long_blocks) {
+ rng_.init();
+ InitializeBuildingBlocks(
+ building_blocks_, kNumberOfBuildingBlocks, long_blocks, &rng_);
+ empty_string_ = Isolate::Current()->heap()->empty_string();
+ Reset();
+}
+
+
+Handle<String> ConsStringGenerationData::block(uint32_t offset) {
+ return building_blocks_[offset % kNumberOfBuildingBlocks ];
+}
+
+
+Handle<String> ConsStringGenerationData::block(int offset) {
+ CHECK_GE(offset, 0);
+ return building_blocks_[offset % kNumberOfBuildingBlocks];
+}
+
+
+void ConsStringGenerationData::Reset() {
+ early_termination_threshold_ = 0.01;
+ leftness_ = 0.75;
+ rightness_ = 0.75;
+ empty_leaf_threshold_ = 0.02;
+ max_leaves_ = 1000;
+ stats_.Reset();
+ early_terminations_ = 0;
+ rng_.init();
+}
+
+
+void AccumulateStats(ConsString* cons_string, ConsStringStats* stats) {
+ int left_length = cons_string->first()->length();
+ int right_length = cons_string->second()->length();
+ CHECK(cons_string->length() == left_length + right_length);
+ // Check left side.
+ bool left_is_cons = cons_string->first()->IsConsString();
+ if (left_is_cons) {
+ stats->left_traversals_++;
+ AccumulateStats(ConsString::cast(cons_string->first()), stats);
+ } else {
+ CHECK_NE(left_length, 0);
+ stats->leaves_++;
+ stats->chars_ += left_length;
+ }
+ // Check right side.
+ if (cons_string->second()->IsConsString()) {
+ stats->right_traversals_++;
+ AccumulateStats(ConsString::cast(cons_string->second()), stats);
+ } else {
+ if (right_length == 0) {
+ stats->empty_leaves_++;
+ CHECK(!left_is_cons);
+ }
+ stats->leaves_++;
+ stats->chars_ += right_length;
}
}
+void AccumulateStats(Handle<String> cons_string, ConsStringStats* stats) {
+ AssertNoAllocation no_alloc;
+ if (cons_string->IsConsString()) {
+ return AccumulateStats(ConsString::cast(*cons_string), stats);
+ }
+ // This string got flattened by gc.
+ stats->chars_ += cons_string->length();
+}
+
+
+void AccumulateStatsWithOperator(
+ ConsString* cons_string, ConsStringStats* stats) {
+ unsigned offset = 0;
+ int32_t type = cons_string->map()->instance_type();
+ unsigned length = static_cast<unsigned>(cons_string->length());
+ ConsStringIteratorOp op;
+ String* string = op.Operate(cons_string, &offset, &type, &length);
+ CHECK(string != NULL);
+ while (true) {
+ ASSERT(!string->IsConsString());
+ // Accumulate stats.
+ stats->leaves_++;
+ stats->chars_ += string->length();
+ // Check for completion.
+ bool keep_going_fast_check = op.HasMore();
+ string = op.ContinueOperation(&type, &length);
+ if (string == NULL) return;
+ // Verify no false positives for fast check.
+ CHECK(keep_going_fast_check);
+ }
+}
+
+
+void VerifyConsString(Handle<String> root, ConsStringGenerationData* data) {
+ // Verify basic data.
+ CHECK(root->IsConsString());
+ CHECK(static_cast<unsigned>(root->length()) == data->stats_.chars_);
+ // Recursive verify.
+ ConsStringStats stats;
+ AccumulateStats(ConsString::cast(*root), &stats);
+ stats.VerifyEqual(data->stats_);
+ // Iteratively verify.
+ stats.Reset();
+ AccumulateStatsWithOperator(ConsString::cast(*root), &stats);
+ // Don't see these. Must copy over.
+ stats.empty_leaves_ = data->stats_.empty_leaves_;
+ stats.left_traversals_ = data->stats_.left_traversals_;
+ stats.right_traversals_ = data->stats_.right_traversals_;
+ // Adjust total leaves to compensate.
+ stats.leaves_ += stats.empty_leaves_;
+ stats.VerifyEqual(data->stats_);
+}
+
+
+static Handle<String> ConstructRandomString(ConsStringGenerationData* data,
+ unsigned max_recursion) {
+ // Compute termination characteristics.
+ bool terminate = false;
+ bool flat = data->rng_.next(data->empty_leaf_threshold_);
+ bool terminate_early = data->rng_.next(data->early_termination_threshold_);
+ if (terminate_early) data->early_terminations_++;
+ // The obvious condition.
+ terminate |= max_recursion == 0;
+ // Flat cons string terminate by definition.
+ terminate |= flat;
+ // Cap for max leaves.
+ terminate |= data->stats_.leaves_ >= data->max_leaves_;
+ // Roll the dice.
+ terminate |= terminate_early;
+ // Compute termination characteristics for each side.
+ bool terminate_left = terminate || !data->rng_.next(data->leftness_);
+ bool terminate_right = terminate || !data->rng_.next(data->rightness_);
+ // Generate left string.
+ Handle<String> left;
+ if (terminate_left) {
+ left = data->block(data->rng_.next());
+ data->stats_.leaves_++;
+ data->stats_.chars_ += left->length();
+ } else {
+ data->stats_.left_traversals_++;
+ }
+ // Generate right string.
+ Handle<String> right;
+ if (terminate_right) {
+ right = data->block(data->rng_.next());
+ data->stats_.leaves_++;
+ data->stats_.chars_ += right->length();
+ } else {
+ data->stats_.right_traversals_++;
+ }
+ // Generate the necessary sub-nodes recursively.
+ if (!terminate_right) {
+ // Need to balance generation fairly.
+ if (!terminate_left && data->rng_.next(0.5)) {
+ left = ConstructRandomString(data, max_recursion - 1);
+ }
+ right = ConstructRandomString(data, max_recursion - 1);
+ }
+ if (!terminate_left && left.is_null()) {
+ left = ConstructRandomString(data, max_recursion - 1);
+ }
+ // Build the cons string.
+ Handle<String> root = FACTORY->NewConsString(left, right);
+ CHECK(root->IsConsString() && !root->IsFlat());
+ // Special work needed for flat string.
+ if (flat) {
+ data->stats_.empty_leaves_++;
+ FlattenString(root);
+ CHECK(root->IsConsString() && root->IsFlat());
+ }
+ return root;
+}
+
+
static Handle<String> ConstructLeft(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+ ConsStringGenerationData* data,
int depth) {
Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+ data->stats_.leaves_++;
for (int i = 0; i < depth; i++) {
- answer = FACTORY->NewConsString(
- answer,
- building_blocks[i % NUMBER_OF_BUILDING_BLOCKS]);
+ Handle<String> block = data->block(i);
+ Handle<String> next = FACTORY->NewConsString(answer, block);
+ if (next->IsConsString()) data->stats_.leaves_++;
+ data->stats_.chars_ += block->length();
+ answer = next;
}
+ data->stats_.left_traversals_ = data->stats_.leaves_ - 2;
return answer;
}
static Handle<String> ConstructRight(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+ ConsStringGenerationData* data,
int depth) {
Handle<String> answer = FACTORY->NewStringFromAscii(CStrVector(""));
+ data->stats_.leaves_++;
for (int i = depth - 1; i >= 0; i--) {
- answer = FACTORY->NewConsString(
- building_blocks[i % NUMBER_OF_BUILDING_BLOCKS],
- answer);
+ Handle<String> block = data->block(i);
+ Handle<String> next = FACTORY->NewConsString(block, answer);
+ if (next->IsConsString()) data->stats_.leaves_++;
+ data->stats_.chars_ += block->length();
+ answer = next;
}
+ data->stats_.right_traversals_ = data->stats_.leaves_ - 2;
return answer;
}
static Handle<String> ConstructBalancedHelper(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS],
+ ConsStringGenerationData* data,
int from,
int to) {
CHECK(to > from);
if (to - from == 1) {
- return building_blocks[from % NUMBER_OF_BUILDING_BLOCKS];
+ data->stats_.chars_ += data->block(from)->length();
+ return data->block(from);
}
if (to - from == 2) {
- return FACTORY->NewConsString(
- building_blocks[from % NUMBER_OF_BUILDING_BLOCKS],
- building_blocks[(from+1) % NUMBER_OF_BUILDING_BLOCKS]);
+ data->stats_.chars_ += data->block(from)->length();
+ data->stats_.chars_ += data->block(from+1)->length();
+ return FACTORY->NewConsString(data->block(from), data->block(from+1));
}
Handle<String> part1 =
- ConstructBalancedHelper(building_blocks, from, from + ((to - from) / 2));
+ ConstructBalancedHelper(data, from, from + ((to - from) / 2));
Handle<String> part2 =
- ConstructBalancedHelper(building_blocks, from + ((to - from) / 2), to);
+ ConstructBalancedHelper(data, from + ((to - from) / 2), to);
+ if (part1->IsConsString()) data->stats_.left_traversals_++;
+ if (part2->IsConsString()) data->stats_.right_traversals_++;
return FACTORY->NewConsString(part1, part2);
}
static Handle<String> ConstructBalanced(
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS]) {
- return ConstructBalancedHelper(building_blocks, 0, DEEP_DEPTH);
+ ConsStringGenerationData* data, int depth = DEEP_DEPTH) {
+ Handle<String> string = ConstructBalancedHelper(data, 0, depth);
+ data->stats_.leaves_ =
+ data->stats_.left_traversals_ + data->stats_.right_traversals_ + 2;
+ return string;
}
-static StringInputBuffer buffer;
-
+static ConsStringIteratorOp cons_string_iterator_op_1;
+static ConsStringIteratorOp cons_string_iterator_op_2;
static void Traverse(Handle<String> s1, Handle<String> s2) {
int i = 0;
- buffer.Reset(*s1);
- StringInputBuffer buffer2(*s2);
- while (buffer.has_more()) {
- CHECK(buffer2.has_more());
- uint16_t c = buffer.GetNext();
- CHECK_EQ(c, buffer2.GetNext());
+ StringCharacterStream character_stream_1(*s1, &cons_string_iterator_op_1);
+ StringCharacterStream character_stream_2(*s2, &cons_string_iterator_op_2);
+ while (character_stream_1.HasMore()) {
+ CHECK(character_stream_2.HasMore());
+ uint16_t c = character_stream_1.GetNext();
+ CHECK_EQ(c, character_stream_2.GetNext());
i++;
}
+ CHECK(!character_stream_1.HasMore());
+ CHECK(!character_stream_2.HasMore());
CHECK_EQ(s1->length(), i);
CHECK_EQ(s2->length(), i);
}
@@ -217,12 +535,12 @@ static void Traverse(Handle<String> s1, Handle<String> s2) {
static void TraverseFirst(Handle<String> s1, Handle<String> s2, int chars) {
int i = 0;
- buffer.Reset(*s1);
- StringInputBuffer buffer2(*s2);
- while (buffer.has_more() && i < chars) {
- CHECK(buffer2.has_more());
- uint16_t c = buffer.GetNext();
- CHECK_EQ(c, buffer2.GetNext());
+ StringCharacterStream character_stream_1(*s1, &cons_string_iterator_op_1);
+ StringCharacterStream character_stream_2(*s2, &cons_string_iterator_op_2);
+ while (character_stream_1.HasMore() && i < chars) {
+ CHECK(character_stream_2.HasMore());
+ uint16_t c = character_stream_1.GetNext();
+ CHECK_EQ(c, character_stream_2.GetNext());
i++;
}
s1->Get(s1->length() - 1);
@@ -234,14 +552,13 @@ TEST(Traverse) {
printf("TestTraverse\n");
InitializeVM();
v8::HandleScope scope;
- Handle<String> building_blocks[NUMBER_OF_BUILDING_BLOCKS];
ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
- InitializeBuildingBlocks(building_blocks);
- Handle<String> flat = ConstructBalanced(building_blocks);
+ ConsStringGenerationData data(false);
+ Handle<String> flat = ConstructBalanced(&data);
FlattenString(flat);
- Handle<String> left_asymmetric = ConstructLeft(building_blocks, DEEP_DEPTH);
- Handle<String> right_asymmetric = ConstructRight(building_blocks, DEEP_DEPTH);
- Handle<String> symmetric = ConstructBalanced(building_blocks);
+ Handle<String> left_asymmetric = ConstructLeft(&data, DEEP_DEPTH);
+ Handle<String> right_asymmetric = ConstructRight(&data, DEEP_DEPTH);
+ Handle<String> symmetric = ConstructBalanced(&data);
printf("1\n");
Traverse(flat, symmetric);
printf("2\n");
@@ -250,9 +567,9 @@ TEST(Traverse) {
Traverse(flat, right_asymmetric);
printf("4\n");
Handle<String> left_deep_asymmetric =
- ConstructLeft(building_blocks, SUPER_DEEP_DEPTH);
+ ConstructLeft(&data, SUPER_DEEP_DEPTH);
Handle<String> right_deep_asymmetric =
- ConstructRight(building_blocks, SUPER_DEEP_DEPTH);
+ ConstructRight(&data, SUPER_DEEP_DEPTH);
printf("5\n");
TraverseFirst(left_asymmetric, left_deep_asymmetric, 1050);
printf("6\n");
@@ -275,6 +592,248 @@ TEST(Traverse) {
}
+static void VerifyCharacterStream(
+ String* flat_string, String* cons_string) {
+ // Do not want to test ConString traversal on flat string.
+ CHECK(flat_string->IsFlat() && !flat_string->IsConsString());
+ CHECK(cons_string->IsConsString());
+ // TODO(dcarney) Test stream reset as well.
+ int length = flat_string->length();
+ // Iterate start search in multiple places in the string.
+ int outer_iterations = length > 20 ? 20 : length;
+ for (int j = 0; j <= outer_iterations; j++) {
+ int offset = length * j / outer_iterations;
+ if (offset < 0) offset = 0;
+ // Want to test the offset == length case.
+ if (offset > length) offset = length;
+ StringCharacterStream flat_stream(
+ flat_string, &cons_string_iterator_op_1, static_cast<unsigned>(offset));
+ StringCharacterStream cons_stream(
+ cons_string, &cons_string_iterator_op_2, static_cast<unsigned>(offset));
+ for (int i = offset; i < length; i++) {
+ uint16_t c = flat_string->Get(i);
+ CHECK(flat_stream.HasMore());
+ CHECK(cons_stream.HasMore());
+ CHECK_EQ(c, flat_stream.GetNext());
+ CHECK_EQ(c, cons_stream.GetNext());
+ }
+ CHECK(!flat_stream.HasMore());
+ CHECK(!cons_stream.HasMore());
+ }
+}
+
+
+static inline void PrintStats(const ConsStringGenerationData& data) {
+#ifdef DEBUG
+printf(
+ "%s: [%d], %s: [%d], %s: [%d], %s: [%d], %s: [%d], %s: [%d]\n",
+ "leaves", data.stats_.leaves_,
+ "empty", data.stats_.empty_leaves_,
+ "chars", data.stats_.chars_,
+ "lefts", data.stats_.left_traversals_,
+ "rights", data.stats_.right_traversals_,
+ "early_terminations", data.early_terminations_);
+#endif
+}
+
+
+template<typename BuildString>
+void TestStringCharacterStream(BuildString build, int test_cases) {
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope outer_scope(isolate);
+ ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ ConsStringGenerationData data(true);
+ for (int i = 0; i < test_cases; i++) {
+ printf("%d\n", i);
+ HandleScope inner_scope(isolate);
+ AlwaysAllocateScope always_allocate;
+ // Build flat version of cons string.
+ Handle<String> flat_string = build(i, &data);
+ ConsStringStats flat_string_stats;
+ AccumulateStats(flat_string, &flat_string_stats);
+ // Flatten string.
+ FlattenString(flat_string);
+ // Build unflattened version of cons string to test.
+ Handle<String> cons_string = build(i, &data);
+ ConsStringStats cons_string_stats;
+ AccumulateStats(cons_string, &cons_string_stats);
+ AssertNoAllocation no_alloc;
+ PrintStats(data);
+ // Full verify of cons string.
+ cons_string_stats.VerifyEqual(flat_string_stats);
+ cons_string_stats.VerifyEqual(data.stats_);
+ VerifyConsString(cons_string, &data);
+ String* flat_string_ptr =
+ flat_string->IsConsString() ?
+ ConsString::cast(*flat_string)->first() :
+ *flat_string;
+ VerifyCharacterStream(flat_string_ptr, *cons_string);
+ }
+}
+
+
+static const int kCharacterStreamNonRandomCases = 8;
+
+
+static Handle<String> BuildEdgeCaseConsString(
+ int test_case, ConsStringGenerationData* data) {
+ data->Reset();
+ switch (test_case) {
+ case 0:
+ return ConstructBalanced(data, 71);
+ case 1:
+ return ConstructLeft(data, 71);
+ case 2:
+ return ConstructRight(data, 71);
+ case 3:
+ return ConstructLeft(data, 10);
+ case 4:
+ return ConstructRight(data, 10);
+ case 5:
+ // 2 element balanced tree.
+ data->stats_.chars_ += data->block(0)->length();
+ data->stats_.chars_ += data->block(1)->length();
+ data->stats_.leaves_ += 2;
+ return FACTORY->NewConsString(data->block(0), data->block(1));
+ case 6:
+ // Simple flattened tree.
+ data->stats_.chars_ += data->block(0)->length();
+ data->stats_.chars_ += data->block(1)->length();
+ data->stats_.leaves_ += 2;
+ data->stats_.empty_leaves_ += 1;
+ {
+ Handle<String> string =
+ FACTORY->NewConsString(data->block(0), data->block(1));
+ FlattenString(string);
+ return string;
+ }
+ case 7:
+ // Left node flattened.
+ data->stats_.chars_ += data->block(0)->length();
+ data->stats_.chars_ += data->block(1)->length();
+ data->stats_.chars_ += data->block(2)->length();
+ data->stats_.leaves_ += 3;
+ data->stats_.empty_leaves_ += 1;
+ data->stats_.left_traversals_ += 1;
+ {
+ Handle<String> left =
+ FACTORY->NewConsString(data->block(0), data->block(1));
+ FlattenString(left);
+ return FACTORY->NewConsString(left, data->block(2));
+ }
+ case 8:
+ // Left node and right node flattened.
+ data->stats_.chars_ += data->block(0)->length();
+ data->stats_.chars_ += data->block(1)->length();
+ data->stats_.chars_ += data->block(2)->length();
+ data->stats_.chars_ += data->block(3)->length();
+ data->stats_.leaves_ += 4;
+ data->stats_.empty_leaves_ += 2;
+ data->stats_.left_traversals_ += 1;
+ data->stats_.right_traversals_ += 1;
+ {
+ Handle<String> left =
+ FACTORY->NewConsString(data->block(0), data->block(1));
+ FlattenString(left);
+ Handle<String> right =
+ FACTORY->NewConsString(data->block(2), data->block(2));
+ FlattenString(right);
+ return FACTORY->NewConsString(left, right);
+ }
+ }
+ UNREACHABLE();
+ return Handle<String>();
+}
+
+
+TEST(StringCharacterStreamEdgeCases) {
+ printf("TestStringCharacterStreamEdgeCases\n");
+ TestStringCharacterStream(
+ BuildEdgeCaseConsString, kCharacterStreamNonRandomCases);
+}
+
+
+static const int kBalances = 3;
+static const int kTreeLengths = 4;
+static const int kEmptyLeaves = 4;
+static const int kUniqueRandomParameters =
+ kBalances*kTreeLengths*kEmptyLeaves;
+
+
+static void InitializeGenerationData(
+ int test_case, ConsStringGenerationData* data) {
+ // Clear the settings and reinit the rng.
+ data->Reset();
+ // Spin up the rng to a known location that is unique per test.
+ static const int kPerTestJump = 501;
+ for (int j = 0; j < test_case*kPerTestJump; j++) {
+ data->rng_.next();
+ }
+ // Choose balanced, left or right heavy trees.
+ switch (test_case % kBalances) {
+ case 0:
+ // Nothing to do. Already balanced.
+ break;
+ case 1:
+ // Left balanced.
+ data->leftness_ = 0.90;
+ data->rightness_ = 0.15;
+ break;
+ case 2:
+ // Right balanced.
+ data->leftness_ = 0.15;
+ data->rightness_ = 0.90;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Must remove the influence of the above decision.
+ test_case /= kBalances;
+ // Choose tree length.
+ switch (test_case % kTreeLengths) {
+ case 0:
+ data->max_leaves_ = 16;
+ data->early_termination_threshold_ = 0.2;
+ break;
+ case 1:
+ data->max_leaves_ = 50;
+ data->early_termination_threshold_ = 0.05;
+ break;
+ case 2:
+ data->max_leaves_ = 500;
+ data->early_termination_threshold_ = 0.03;
+ break;
+ case 3:
+ data->max_leaves_ = 5000;
+ data->early_termination_threshold_ = 0.001;
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ // Must remove the influence of the above decision.
+ test_case /= kTreeLengths;
+ // Choose how much we allow empty nodes, including not at all.
+ data->empty_leaf_threshold_ =
+ 0.03 * static_cast<double>(test_case % kEmptyLeaves);
+}
+
+
+static Handle<String> BuildRandomConsString(
+ int test_case, ConsStringGenerationData* data) {
+ InitializeGenerationData(test_case, data);
+ return ConstructRandomString(data, 200);
+}
+
+
+TEST(StringCharacterStreamRandom) {
+ printf("StringCharacterStreamRandom\n");
+ TestStringCharacterStream(BuildRandomConsString, kUniqueRandomParameters*7);
+}
+
+
static const int DEEP_ASCII_DEPTH = 100000;
@@ -441,7 +1000,8 @@ TEST(CachedHashOverflow) {
// We incorrectly allowed strings to be tagged as array indices even if their
// values didn't fit in the hash field.
// See http://code.google.com/p/v8/issues/detail?id=728
- ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
+ Isolate* isolate = Isolate::Current();
+ ZoneScope zone(isolate->runtime_zone(), DELETE_ON_EXIT);
InitializeVM();
v8::HandleScope handle_scope;
@@ -458,16 +1018,15 @@ TEST(CachedHashOverflow) {
NULL
};
- Handle<Smi> fortytwo(Smi::FromInt(42));
- Handle<Smi> thirtyseven(Smi::FromInt(37));
- Handle<Object> results[] = {
- FACTORY->undefined_value(),
- fortytwo,
- FACTORY->undefined_value(),
- FACTORY->undefined_value(),
- thirtyseven,
- fortytwo,
- thirtyseven // Bug yielded 42 here.
+ Handle<Smi> fortytwo(Smi::FromInt(42), isolate);
+ Handle<Smi> thirtyseven(Smi::FromInt(37), isolate);
+ Handle<Object> results[] = { isolate->factory()->undefined_value(),
+ fortytwo,
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value(),
+ thirtyseven,
+ fortytwo,
+ thirtyseven // Bug yielded 42 here.
};
const char* line;
@@ -539,7 +1098,7 @@ TEST(TrivialSlice) {
// actually creates a new string (it should not).
FLAG_string_slices = true;
InitializeVM();
- HandleScope scope;
+ v8::HandleScope scope;
v8::Local<v8::Value> result;
Handle<String> string;
const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
@@ -568,7 +1127,7 @@ TEST(SliceFromSlice) {
// actually creates a new string (it should not).
FLAG_string_slices = true;
InitializeVM();
- HandleScope scope;
+ v8::HandleScope scope;
v8::Local<v8::Value> result;
Handle<String> string;
const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
@@ -636,7 +1195,7 @@ TEST(RobustSubStringStub) {
// If not recognized, those unsafe arguments lead to out-of-bounds reads.
FLAG_allow_natives_syntax = true;
InitializeVM();
- HandleScope scope;
+ v8::HandleScope scope;
v8::Local<v8::Value> result;
Handle<String> string;
CompileRun("var short = 'abcdef';");
@@ -680,7 +1239,7 @@ TEST(RobustSubStringStub) {
TEST(RegExpOverflow) {
// Result string has the length 2^32, causing a 32-bit integer overflow.
InitializeVM();
- HandleScope scope;
+ v8::HandleScope scope;
LocalContext context;
v8::V8::IgnoreOutOfMemoryException();
v8::Local<v8::Value> result = CompileRun(
@@ -696,7 +1255,7 @@ TEST(RegExpOverflow) {
TEST(StringReplaceAtomTwoByteResult) {
InitializeVM();
- HandleScope scope;
+ v8::HandleScope scope;
LocalContext context;
v8::Local<v8::Value> result = CompileRun(
"var subject = 'ascii~only~string~'; "
@@ -713,5 +1272,64 @@ TEST(StringReplaceAtomTwoByteResult) {
TEST(IsAscii) {
CHECK(String::IsAscii(static_cast<char*>(NULL), 0));
- CHECK(String::IsAscii(static_cast<uc16*>(NULL), 0));
+ CHECK(String::IsOneByte(static_cast<uc16*>(NULL), 0));
+}
+
+
+
+#ifdef ENABLE_LATIN_1
+template<typename Op, bool return_first>
+static uint16_t ConvertLatin1(uint16_t c) {
+ uint32_t result[Op::kMaxWidth];
+ int chars;
+ chars = Op::Convert(c, 0, result, NULL);
+ if (chars == 0) return 0;
+ CHECK_LE(chars, static_cast<int>(sizeof(result)));
+ if (!return_first && chars > 1) {
+ return 0;
+ }
+ return result[0];
+}
+
+
+static void CheckCanonicalEquivalence(uint16_t c, uint16_t test) {
+ uint16_t expect = ConvertLatin1<unibrow::Ecma262UnCanonicalize, true>(c);
+ if (expect > unibrow::Latin1::kMaxChar) expect = 0;
+ CHECK_EQ(expect, test);
+}
+
+
+TEST(Latin1IgnoreCase) {
+ if (true) return;
+ using namespace unibrow;
+ for (uint16_t c = Latin1::kMaxChar + 1; c != 0; c++) {
+ uint16_t lower = ConvertLatin1<ToLowercase, false>(c);
+ uint16_t upper = ConvertLatin1<ToUppercase, false>(c);
+ uint16_t test = Latin1::ConvertNonLatin1ToLatin1(c);
+ // Filter out all character whose upper is not their lower or vice versa.
+ if (lower == 0 && upper == 0) {
+ CheckCanonicalEquivalence(c, test);
+ continue;
+ }
+ if (lower > Latin1::kMaxChar && upper > Latin1::kMaxChar) {
+ CheckCanonicalEquivalence(c, test);
+ continue;
+ }
+ if (lower == 0 && upper != 0) {
+ lower = ConvertLatin1<ToLowercase, false>(upper);
+ }
+ if (upper == 0 && lower != c) {
+ upper = ConvertLatin1<ToUppercase, false>(lower);
+ }
+ if (lower > Latin1::kMaxChar && upper > Latin1::kMaxChar) {
+ CheckCanonicalEquivalence(c, test);
+ continue;
+ }
+ if (upper != c && lower != c) {
+ CheckCanonicalEquivalence(c, test);
+ continue;
+ }
+ CHECK_EQ(Min(upper, lower), test);
+ }
}
+#endif // ENABLE_LATIN_1
diff --git a/src/3rdparty/v8/test/cctest/test-symbols.cc b/src/3rdparty/v8/test/cctest/test-symbols.cc
new file mode 100644
index 0000000..adc100d
--- /dev/null
+++ b/src/3rdparty/v8/test/cctest/test-symbols.cc
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+
+// Check that we can traverse very deep stacks of ConsStrings using
+// StringCharacterStram. Check that Get(int) works on very deep stacks
+// of ConsStrings. These operations may not be very fast, but they
+// should be possible without getting errors due to too deep recursion.
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "objects.h"
+
+using namespace v8::internal;
+
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ v8::HandleScope scope;
+ const char* extensions[] = { "v8/print" };
+ v8::ExtensionConfiguration config(1, extensions);
+ env = v8::Context::New(&config);
+ }
+ v8::HandleScope scope;
+ env->Enter();
+}
+
+
+TEST(Create) {
+ InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ const int kNumSymbols = 30;
+ Handle<Symbol> symbols[kNumSymbols];
+
+ for (int i = 0; i < kNumSymbols; ++i) {
+ symbols[i] = isolate->factory()->NewSymbol();
+ CHECK(symbols[i]->IsName());
+ CHECK(symbols[i]->IsSymbol());
+ CHECK(symbols[i]->HasHashCode());
+ CHECK_GT(symbols[i]->Hash(), 0);
+ symbols[i]->ShortPrint();
+ PrintF("\n");
+#if OBJECT_PRINT
+ symbols[i]->Print();
+#endif
+#if VERIFY_HEAP
+ symbols[i]->Verify();
+#endif
+ }
+
+ HEAP->PerformScavenge();
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+
+ // All symbols should be distinct.
+ for (int i = 0; i < kNumSymbols; ++i) {
+ CHECK(symbols[i]->SameValue(*symbols[i]));
+ for (int j = i + 1; j < kNumSymbols; ++j) {
+ CHECK(!symbols[i]->SameValue(*symbols[j]));
+ }
+ }
+}
diff --git a/src/3rdparty/v8/test/cctest/test-thread-termination.cc b/src/3rdparty/v8/test/cctest/test-thread-termination.cc
index cebabaa..b249c7a 100644
--- a/src/3rdparty/v8/test/cctest/test-thread-termination.cc
+++ b/src/3rdparty/v8/test/cctest/test-thread-termination.cc
@@ -134,7 +134,7 @@ TEST(TerminateOnlyV8ThreadFromThreadItself) {
// Test that we can run the code again after thread termination.
CHECK(!v8::V8::IsExecutionTerminating());
v8::Script::Compile(source)->Run();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -154,7 +154,7 @@ TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
CHECK(!v8::V8::IsExecutionTerminating());
// Test that we can run the code again after thread termination.
v8::Script::Compile(source)->Run();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -194,7 +194,7 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
thread.Join();
delete semaphore;
semaphore = NULL;
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
@@ -202,7 +202,7 @@ class LoopingThread : public v8::internal::Thread {
public:
LoopingThread() : Thread("LoopingThread") { }
void Run() {
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::HandleScope scope;
v8_thread_id_ = v8::V8::GetCurrentThreadId();
v8::Handle<v8::ObjectTemplate> global =
@@ -214,7 +214,7 @@ class LoopingThread : public v8::internal::Thread {
v8::Handle<v8::String> source =
v8::String::New("try { loop(); fail(); } catch(e) { fail(); }");
v8::Script::Compile(source)->Run();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
int GetV8ThreadId() { return v8_thread_id_; }
@@ -228,7 +228,7 @@ class LoopingThread : public v8::internal::Thread {
// from another thread when using Lockers and preemption.
TEST(TerminateMultipleV8ThreadsDefaultIsolate) {
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::V8::Initialize();
v8::Locker::StartPreemption(1);
semaphore = v8::internal::OS::CreateSemaphore(0);
@@ -246,7 +246,7 @@ TEST(TerminateMultipleV8ThreadsDefaultIsolate) {
semaphore->Wait();
}
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
for (int i = 0; i < kThreads; i++) {
v8::V8::TerminateExecution(threads[i]->GetV8ThreadId());
}
@@ -256,7 +256,7 @@ TEST(TerminateMultipleV8ThreadsDefaultIsolate) {
delete threads[i];
}
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::Locker::StopPreemption();
}
@@ -326,7 +326,7 @@ TEST(TerminateLoadICException) {
CHECK(!v8::V8::IsExecutionTerminating());
call_count = 0;
v8::Script::Compile(source)->Run();
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
v8::Handle<v8::Value> ReenterAfterTermination(const v8::Arguments& args) {
@@ -370,6 +370,5 @@ TEST(TerminateAndReenterFromThreadItself) {
// Check we can run JS again after termination.
CHECK(v8::Script::Compile(v8::String::New("function f() { return true; }"
"f()"))->Run()->IsTrue());
- context.Dispose();
+ context.Dispose(context->GetIsolate());
}
-
diff --git a/src/3rdparty/v8/test/cctest/test-threads.cc b/src/3rdparty/v8/test/cctest/test-threads.cc
index 713d1e8..ffb29cd 100644
--- a/src/3rdparty/v8/test/cctest/test-threads.cc
+++ b/src/3rdparty/v8/test/cctest/test-threads.cc
@@ -34,7 +34,7 @@
TEST(Preemption) {
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::V8::Initialize();
v8::HandleScope scope;
v8::Context::Scope context_scope(v8::Context::New());
@@ -67,7 +67,7 @@ class ThreadA : public v8::internal::Thread {
public:
ThreadA() : Thread("ThreadA") { }
void Run() {
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
v8::HandleScope scope;
v8::Context::Scope context_scope(v8::Context::New());
@@ -86,7 +86,7 @@ class ThreadA : public v8::internal::Thread {
turn = CLEAN_CACHE;
do {
{
- v8::Unlocker unlocker;
+ v8::Unlocker unlocker(CcTest::default_isolate());
Thread::YieldCPU();
}
} while (turn != SECOND_TIME_FILL_CACHE);
@@ -105,7 +105,7 @@ class ThreadB : public v8::internal::Thread {
void Run() {
do {
{
- v8::Locker locker;
+ v8::Locker locker(CcTest::default_isolate());
if (turn == CLEAN_CACHE) {
v8::HandleScope scope;
v8::Context::Scope context_scope(v8::Context::New());
diff --git a/src/3rdparty/v8/test/cctest/test-weakmaps.cc b/src/3rdparty/v8/test/cctest/test-weakmaps.cc
index 7c98c57..cd333d3 100644
--- a/src/3rdparty/v8/test/cctest/test-weakmaps.cc
+++ b/src/3rdparty/v8/test/cctest/test-weakmaps.cc
@@ -34,6 +34,11 @@
using namespace v8::internal;
+static Isolate* GetIsolateFrom(LocalContext* context) {
+ return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
static Handle<JSWeakMap> AllocateJSWeakMap() {
Handle<Map> map = FACTORY->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize);
Handle<JSObject> weakmap_obj = FACTORY->NewJSObjectFromMap(map);
@@ -57,10 +62,12 @@ static void PutIntoWeakMap(Handle<JSWeakMap> weakmap,
}
static int NumberOfWeakCalls = 0;
-static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
+static void WeakPointerCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void* id) {
ASSERT(id == reinterpret_cast<void*>(1234));
NumberOfWeakCalls++;
- handle.Dispose();
+ handle.Dispose(isolate);
}
@@ -69,7 +76,7 @@ TEST(Weakness) {
LocalContext context;
v8::HandleScope scope;
Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
- GlobalHandles* global_handles = Isolate::Current()->global_handles();
+ GlobalHandles* global_handles = GetIsolateFrom(&context)->global_handles();
// Keep global reference to the key.
Handle<Object> key;
@@ -86,7 +93,7 @@ TEST(Weakness) {
v8::HandleScope scope;
PutIntoWeakMap(weakmap,
Handle<JSObject>(JSObject::cast(*key)),
- Handle<Smi>(Smi::FromInt(23)));
+ Handle<Smi>(Smi::FromInt(23), GetIsolateFrom(&context)));
}
CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
@@ -102,6 +109,7 @@ TEST(Weakness) {
v8::HandleScope scope;
global_handles->MakeWeak(key.location(),
reinterpret_cast<void*>(1234),
+ NULL,
&WeakPointerCallback);
}
CHECK(global_handles->IsWeak(key.location()));
@@ -136,7 +144,8 @@ TEST(Shrinking) {
Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
for (int i = 0; i < 32; i++) {
Handle<JSObject> object = FACTORY->NewJSObjectFromMap(map);
- PutIntoWeakMap(weakmap, object, Handle<Smi>(Smi::FromInt(i)));
+ PutIntoWeakMap(weakmap, object,
+ Handle<Smi>(Smi::FromInt(i), GetIsolateFrom(&context)));
}
}
@@ -164,7 +173,7 @@ TEST(Regress2060a) {
LocalContext context;
v8::HandleScope scope;
Handle<JSFunction> function =
- FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
+ FACTORY->NewFunction(FACTORY->function_string(), FACTORY->null_value());
Handle<JSObject> key = FACTORY->NewJSObject(function);
Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
@@ -200,7 +209,7 @@ TEST(Regress2060b) {
LocalContext context;
v8::HandleScope scope;
Handle<JSFunction> function =
- FACTORY->NewFunction(FACTORY->function_symbol(), FACTORY->null_value());
+ FACTORY->NewFunction(FACTORY->function_string(), FACTORY->null_value());
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = HEAP->old_pointer_space()->anchor()->next_page();
@@ -215,7 +224,9 @@ TEST(Regress2060b) {
}
Handle<JSWeakMap> weakmap = AllocateJSWeakMap();
for (int i = 0; i < 32; i++) {
- PutIntoWeakMap(weakmap, keys[i], Handle<Smi>(Smi::FromInt(i)));
+ PutIntoWeakMap(weakmap,
+ keys[i],
+ Handle<Smi>(Smi::FromInt(i), GetIsolateFrom(&context)));
}
// Force compacting garbage collection. The subsequent collections are used
diff --git a/src/3rdparty/v8/test/cctest/testcfg.py b/src/3rdparty/v8/test/cctest/testcfg.py
index 69a5db2..86dc740 100644
--- a/src/3rdparty/v8/test/cctest/testcfg.py
+++ b/src/3rdparty/v8/test/cctest/testcfg.py
@@ -45,13 +45,12 @@ class CcTestSuite(testsuite.TestSuite):
os.makedirs(self.serdes_dir)
def ListTests(self, context):
- if utils.IsWindows():
- shell += '.exe'
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
- output = commands.Execute([context.command_prefix,
- shell,
- '--list',
- context.extra_flags])
+ if utils.IsWindows():
+ shell += ".exe"
+ output = commands.Execute(context.command_prefix +
+ [shell, "--list"] +
+ context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
diff --git a/src/3rdparty/v8/test/message/overwritten-builtins.out b/src/3rdparty/v8/test/message/overwritten-builtins.out
index ccf2924..db31bbf 100644
--- a/src/3rdparty/v8/test/message/overwritten-builtins.out
+++ b/src/3rdparty/v8/test/message/overwritten-builtins.out
@@ -28,3 +28,6 @@
*%(basename)s:31: TypeError: Cannot read property 'x' of undefined
undefined.x
^
+TypeError: Cannot read property 'x' of undefined
+ at *%(basename)s:31:10
+
diff --git a/src/3rdparty/v8/test/mjsunit/allocation-site-info.js b/src/3rdparty/v8/test/mjsunit/allocation-site-info.js
new file mode 100644
index 0000000..d57fd32
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/allocation-site-info.js
@@ -0,0 +1,272 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --track-allocation-sites --nooptimize-constructed-arrays
+
+// TODO(mvstanton): remove --nooptimize-constructed-arrays and enable
+// the constructed array code below when the feature is turned on
+// by default.
+
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile
+// time sticks if built with snapshot. If --smi-only-arrays is deactivated
+// by default, only a no-snapshot build actually has smi-only arrays enabled
+// in this test case. Depending on whether smi-only arrays are actually
+// enabled, this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
+optimize_constructed_arrays = false;
+
+if (support_smi_only_arrays) {
+ print("Tests include smi-only arrays.");
+} else {
+ print("Tests do NOT include smi-only arrays.");
+}
+
+if (optimize_constructed_arrays) {
+ print("Tests include constructed array optimizations.");
+} else {
+ print("Tests do NOT include constructed array optimizations.");
+}
+
+var elements_kind = {
+ fast_smi_only : 'fast smi only elements',
+ fast : 'fast elements',
+ fast_double : 'fast double elements',
+ dictionary : 'dictionary elements',
+ external_byte : 'external byte elements',
+ external_unsigned_byte : 'external unsigned byte elements',
+ external_short : 'external short elements',
+ external_unsigned_short : 'external unsigned short elements',
+ external_int : 'external int elements',
+ external_unsigned_int : 'external unsigned int elements',
+ external_float : 'external float elements',
+ external_double : 'external double elements',
+ external_pixel : 'external pixel elements'
+}
+
+function getKind(obj) {
+ if (%HasFastSmiElements(obj)) return elements_kind.fast_smi_only;
+ if (%HasFastObjectElements(obj)) return elements_kind.fast;
+ if (%HasFastDoubleElements(obj)) return elements_kind.fast_double;
+ if (%HasDictionaryElements(obj)) return elements_kind.dictionary;
+}
+
+function isHoley(obj) {
+ if (%HasFastHoleyElements(obj)) return true;
+ return false;
+}
+
+function assertKind(expected, obj, name_opt) {
+ if (!support_smi_only_arrays &&
+ expected == elements_kind.fast_smi_only) {
+ expected = elements_kind.fast;
+ }
+ assertEquals(expected, getKind(obj), name_opt);
+}
+
+function assertHoley(obj, name_opt) {
+ assertEquals(true, isHoley(obj), name_opt);
+}
+
+function assertNotHoley(obj, name_opt) {
+ assertEquals(false, isHoley(obj), name_opt);
+}
+
+if (support_smi_only_arrays) {
+
+ obj = [];
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = [1, 2, 3];
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array();
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(0);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(2);
+ assertHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(1,2,3);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ obj = new Array(1, "hi", 2, undefined);
+ assertNotHoley(obj);
+ assertKind(elements_kind.fast, obj);
+
+ function fastliteralcase(literal, value) {
+ literal[0] = value;
+ return literal;
+ }
+
+ function get_standard_literal() {
+ var literal = [1, 2, 3];
+ return literal;
+ }
+
+ // Case: [1,2,3] as allocation site
+ obj = fastliteralcase(get_standard_literal(), 1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = fastliteralcase(get_standard_literal(), 1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = fastliteralcase(get_standard_literal(), 2);
+ assertKind(elements_kind.fast_double, obj);
+
+ obj = fastliteralcase([5, 3, 2], 1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = fastliteralcase([3, 6, 2], 1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = fastliteralcase([2, 6, 3], 2);
+ assertKind(elements_kind.fast_smi_only, obj);
+
+ // Verify that we will not pretransition the double->fast path.
+ obj = fastliteralcase(get_standard_literal(), "elliot");
+ assertKind(elements_kind.fast, obj);
+ // This fails until we turn off optimistic transitions to the
+ // most general elements kind seen on keyed stores. It's a goal
+ // to turn it off, but for now we need it.
+ // obj = fastliteralcase(3);
+ // assertKind(elements_kind.fast_double, obj);
+
+ function fastliteralcase_smifast(value) {
+ var literal = [1, 2, 3, 4];
+ literal[0] = value;
+ return literal;
+ }
+
+ obj = fastliteralcase_smifast(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = fastliteralcase_smifast("carter");
+ assertKind(elements_kind.fast, obj);
+ obj = fastliteralcase_smifast(2);
+ assertKind(elements_kind.fast, obj);
+
+ if (optimize_constructed_arrays) {
+ function newarraycase_smidouble(value) {
+ var a = new Array();
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array() as allocation site, smi->double
+ obj = newarraycase_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_smiobj(value) {
+ var a = new Array();
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array() as allocation site, smi->fast
+ obj = newarraycase_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_smiobj("gloria");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+
+ function newarraycase_length_smidouble(value) {
+ var a = new Array(3);
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array(length) as allocation site
+ obj = newarraycase_length_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_length_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_length_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ // Try to continue the transition to fast object, but
+ // we will not pretransition from double->fast, because
+ // it may hurt performance ("poisoning").
+ obj = newarraycase_length_smidouble("coates");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_length_smidouble(2.5);
+ // However, because of optimistic transitions, we will
+ // transition to the most general kind of elements kind found,
+ // therefore I can't count on this assert yet.
+ // assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_length_smiobj(value) {
+ var a = new Array(3);
+ a[0] = value;
+ return a;
+ }
+
+ // Case: new Array(<length>) as allocation site, smi->fast
+ obj = newarraycase_length_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_length_smiobj("gloria");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_length_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+
+ function newarraycase_list_smidouble(value) {
+ var a = new Array(1, 2, 3);
+ a[0] = value;
+ return a;
+ }
+
+ obj = newarraycase_list_smidouble(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_list_smidouble(1.5);
+ assertKind(elements_kind.fast_double, obj);
+ obj = newarraycase_list_smidouble(2);
+ assertKind(elements_kind.fast_double, obj);
+
+ function newarraycase_list_smiobj(value) {
+ var a = new Array(4, 5, 6);
+ a[0] = value;
+ return a;
+ }
+
+ obj = newarraycase_list_smiobj(1);
+ assertKind(elements_kind.fast_smi_only, obj);
+ obj = newarraycase_list_smiobj("coates");
+ assertKind(elements_kind.fast, obj);
+ obj = newarraycase_list_smiobj(2);
+ assertKind(elements_kind.fast, obj);
+ }
+}
diff --git a/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js b/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js
index df7988b..10e11f0 100644
--- a/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/src/3rdparty/v8/test/mjsunit/array-bounds-check-removal.js
@@ -52,13 +52,13 @@ test_do_not_assert_on_non_int32(v,"a");
%OptimizeFunctionOnNextCall(test_do_not_assert_on_non_int32);
test_do_not_assert_on_non_int32(v,0);
-function test_base(base,cond) {
+function test_base(a, base, condition) {
a[base + 1] = 1;
a[base + 4] = 2;
a[base + 3] = 3;
a[base + 2] = 4;
a[base + 4] = base + 4;
- if (cond) {
+ if (condition) {
a[base + 1] = 1;
a[base + 2] = 2;
a[base + 2] = 3;
@@ -73,8 +73,8 @@ function test_base(base,cond) {
}
}
-function check_test_base(base,cond) {
- if (cond) {
+function check_test_base(a, base, condition) {
+ if (condition) {
assertEquals(1, a[base + 1]);
assertEquals(4, a[base + 2]);
assertEquals(base + 4, a[base + 4]);
@@ -87,6 +87,37 @@ function check_test_base(base,cond) {
}
+test_base(a, 1, true);
+test_base(a, 2, true);
+test_base(a, 1, false);
+test_base(a, 2, false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(a, 3, true);
+check_test_base(a, 3, true);
+test_base(a, 3, false);
+check_test_base(a, 3, false);
+
+// Test that we deopt on failed bounds checks.
+var dictionary_map_array = new Int32Array(128);
+test_base(dictionary_map_array, 5, true);
+test_base(dictionary_map_array, 6, true);
+test_base(dictionary_map_array, 5, false);
+test_base(dictionary_map_array, 6, false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(dictionary_map_array, -2, true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+
+// Forget about the dictionary_map_array's map.
+%ClearFunctionTypeFeedback(test_base);
+
+test_base(a, 5, true);
+test_base(a, 6, true);
+test_base(a, 5, false);
+test_base(a, 6, false);
+%OptimizeFunctionOnNextCall(test_base);
+test_base(a, 2048, true);
+assertTrue(%GetOptimizationStatus(test_base) != 1);
+
function test_minus(base,cond) {
a[base - 1] = 1;
a[base - 2] = 2;
@@ -122,16 +153,6 @@ function check_test_minus(base,cond) {
}
}
-test_base(1,true);
-test_base(2,true);
-test_base(1,false);
-test_base(2,false);
-%OptimizeFunctionOnNextCall(test_base);
-test_base(3,true);
-check_test_base(3,true);
-test_base(3,false);
-check_test_base(3,false);
-
test_minus(5,true);
test_minus(6,true);
%OptimizeFunctionOnNextCall(test_minus);
@@ -140,30 +161,7 @@ check_test_minus(7,true);
test_minus(7,false);
check_test_minus(7,false);
-// Optimization status:
-// YES: 1
-// NO: 2
-// ALWAYS: 3
-// NEVER: 4
-
-// Test that we still deopt on failed bound checks
-test_base(5,true);
-test_base(6,true);
-test_base(5,false);
-test_base(6,false);
-%OptimizeFunctionOnNextCall(test_base);
-test_base(-2,true);
-assertTrue(%GetOptimizationStatus(test_base) != 1);
-
-test_base(5,true);
-test_base(6,true);
-test_base(5,false);
-test_base(6,false);
-%OptimizeFunctionOnNextCall(test_base);
-test_base(2048,true);
-assertTrue(%GetOptimizationStatus(test_base) != 1);
-
-// Specific test on negative offsets
+// Specific test on negative offsets.
var short_a = new Array(100);
for (var i = 0; i < short_a.length; i++) short_a[i] = 0;
function short_test(a, i) {
@@ -174,9 +172,32 @@ short_test(short_a, 50);
short_test(short_a, 50);
%OptimizeFunctionOnNextCall(short_test);
short_a.length = 10;
-short_test(a, 0);
+short_test(short_a, 0);
assertTrue(%GetOptimizationStatus(short_test) != 1);
-gc();
+// A test for when we would modify a phi index.
+var data_phi = [0, 1, 2, 3, 4, 5, 6, 7, 8];
+function test_phi(a, base, check) {
+ var index;
+ if (check) {
+ index = base + 1;
+ } else {
+ index = base + 2;
+ }
+ var result = a[index];
+ result += a[index + 1];
+ result += a[index - 1];
+ return result;
+}
+var result_phi = 0;
+result_phi = test_phi(data_phi, 3, true);
+assertEquals(12, result_phi);
+result_phi = test_phi(data_phi, 3, true);
+assertEquals(12, result_phi);
+%OptimizeFunctionOnNextCall(test_phi);
+result_phi = test_phi(data_phi, 3, true);
+assertEquals(12, result_phi);
+
+gc();
diff --git a/src/3rdparty/v8/test/mjsunit/array-natives-elements.js b/src/3rdparty/v8/test/mjsunit/array-natives-elements.js
new file mode 100644
index 0000000..b3a7141
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/array-natives-elements.js
@@ -0,0 +1,318 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --noparallel-recompilation
+// Flags: --notrack-allocation-sites
+
+// Test element kind of objects.
+// Since --smi-only-arrays affects builtins, its default setting at compile time
+// sticks if built with snapshot. If --smi-only-arrays is deactivated by
+// default, only a no-snapshot build actually has smi-only arrays enabled in
+// this test case. Depending on whether smi-only arrays are actually enabled,
+// this test takes the appropriate code path to check smi-only arrays.
+
+support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
+
+if (support_smi_only_arrays) {
+ print("Tests include smi-only arrays.");
+} else {
+ print("Tests do NOT include smi-only arrays.");
+}
+
+// IC and Crankshaft support for smi-only elements in dynamic array literals.
+function get(foo) { return foo; } // Used to generate dynamic values.
+
+function array_natives_test() {
+
+ // Ensure small array literals start in specific element kind mode.
+ assertTrue(%HasFastSmiElements([]));
+ assertTrue(%HasFastSmiElements([1]));
+ assertTrue(%HasFastSmiElements([1,2]));
+ assertTrue(%HasFastDoubleElements([1.1]));
+ assertTrue(%HasFastDoubleElements([1.1,2]));
+
+ // Push
+ var a0 = [1, 2, 3];
+ if (%HasFastSmiElements(a0)) {
+ assertTrue(%HasFastSmiElements(a0));
+ a0.push(4);
+ assertTrue(%HasFastSmiElements(a0));
+ a0.push(1.3);
+ assertTrue(%HasFastDoubleElements(a0));
+ a0.push(1.5);
+ assertTrue(%HasFastDoubleElements(a0));
+ a0.push({});
+ assertTrue(%HasFastObjectElements(a0));
+ a0.push({});
+ assertTrue(%HasFastObjectElements(a0));
+ } else {
+ assertTrue(%HasFastObjectElements(a0));
+ a0.push(4);
+ a0.push(1.3);
+ a0.push(1.5);
+ a0.push({});
+ a0.push({});
+ assertTrue(%HasFastObjectElements(a0));
+ }
+ assertEquals([1,2,3,4,1.3,1.5,{},{}], a0);
+
+ // Concat
+ var a1;
+ a1 = [1,2,3].concat([]);
+ assertTrue(%HasFastSmiElements(a1));
+ assertEquals([1,2,3], a1);
+ a1 = [1,2,3].concat([4,5,6]);
+ assertTrue(%HasFastSmiElements(a1));
+ assertEquals([1,2,3,4,5,6], a1);
+ a1 = [1,2,3].concat([4,5,6], [7,8,9]);
+ assertTrue(%HasFastSmiElements(a1));
+ assertEquals([1,2,3,4,5,6,7,8,9], a1);
+ a1 = [1.1,2,3].concat([]);
+ assertTrue(%HasFastDoubleElements(a1));
+ assertEquals([1.1,2,3], a1);
+ a1 = [1,2,3].concat([1.1, 2]);
+ assertTrue(%HasFastDoubleElements(a1));
+ assertEquals([1,2,3,1.1,2], a1);
+ a1 = [1.1,2,3].concat([1, 2]);
+ assertTrue(%HasFastDoubleElements(a1));
+ assertEquals([1.1,2,3,1,2], a1);
+ a1 = [1.1,2,3].concat([1.2, 2]);
+ assertTrue(%HasFastDoubleElements(a1));
+ assertEquals([1.1,2,3,1.2,2], a1);
+
+ a1 = [1,2,3].concat([{}]);
+ assertTrue(%HasFastObjectElements(a1));
+ assertEquals([1,2,3,{}], a1);
+ a1 = [1.1,2,3].concat([{}]);
+ assertTrue(%HasFastObjectElements(a1));
+ assertEquals([1.1,2,3,{}], a1);
+ a1 = [{}].concat([1,2,3]);
+ assertTrue(%HasFastObjectElements(a1));
+ assertEquals([{},1,2,3], a1);
+ a1 = [{}].concat([1.1,2,3]);
+ assertTrue(%HasFastObjectElements(a1));
+ assertEquals([{},1.1,2,3], a1);
+
+ // Slice
+ var a2 = [1,2,3];
+ assertTrue(%HasFastSmiElements(a2.slice()));
+ assertTrue(%HasFastSmiElements(a2.slice(1)));
+ assertTrue(%HasFastSmiElements(a2.slice(1, 2)));
+ assertEquals([1,2,3], a2.slice());
+ assertEquals([2,3], a2.slice(1));
+ assertEquals([2], a2.slice(1,2));
+ a2 = [1.1,2,3];
+ assertTrue(%HasFastDoubleElements(a2.slice()));
+ assertTrue(%HasFastDoubleElements(a2.slice(1)));
+ assertTrue(%HasFastDoubleElements(a2.slice(1, 2)));
+ assertEquals([1.1,2,3], a2.slice());
+ assertEquals([2,3], a2.slice(1));
+ assertEquals([2], a2.slice(1,2));
+ a2 = [{},2,3];
+ assertTrue(%HasFastObjectElements(a2.slice()));
+ assertTrue(%HasFastObjectElements(a2.slice(1)));
+ assertTrue(%HasFastObjectElements(a2.slice(1, 2)));
+ assertEquals([{},2,3], a2.slice());
+ assertEquals([2,3], a2.slice(1));
+ assertEquals([2], a2.slice(1,2));
+
+ // Splice
+ var a3 = [1,2,3];
+ var a3r;
+ a3r = a3.splice(0, 0);
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastSmiElements(a3));
+ assertEquals([], a3r);
+ assertEquals([1, 2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 1);
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastSmiElements(a3));
+ assertEquals([1], a3r);
+ assertEquals([2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 0, 2);
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastSmiElements(a3));
+ assertEquals([], a3r);
+ assertEquals([2, 1, 2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 1, 2);
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastSmiElements(a3));
+ assertEquals([1], a3r);
+ assertEquals([2, 2, 3], a3);
+
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 0);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([], a3r);
+ assertEquals([1.1, 2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 1);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([1.1], a3r);
+ assertEquals([2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 0, 2);
+ // Commented out since handled in js, which takes the best fit.
+ // assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([], a3r);
+ assertEquals([2, 1.1, 2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 1, 2);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([1.1], a3r);
+ assertEquals([2, 2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 0, 2.1);
+ // Commented out since handled in js, which takes the best fit.
+ // assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([], a3r);
+ assertEquals([2.1, 1.1, 2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 1, 2.2);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([1.1], a3r);
+ assertEquals([2.2, 2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 0, 2.1);
+ // Commented out since handled in js, which takes the best fit.
+ // assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([], a3r);
+ assertEquals([2.1, 1, 2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 1, 2.2);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([1], a3r);
+ assertEquals([2.2, 2, 3], a3);
+
+ a3 = [{},2,3];
+ a3r = a3.splice(0, 0);
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([], a3r);
+ assertEquals([{}, 2, 3], a3);
+ a3 = [1,2,{}];
+ a3r = a3.splice(0, 1);
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([1], a3r);
+ assertEquals([2, {}], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 0, {});
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([], a3r);
+ assertEquals([{}, 1, 2, 3], a3);
+ a3 = [1,2,3];
+ a3r = a3.splice(0, 1, {});
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([1], a3r);
+ assertEquals([{}, 2, 3], a3);
+
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 0, {});
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([], a3r);
+ assertEquals([{}, 1.1, 2, 3], a3);
+ a3 = [1.1,2,3];
+ a3r = a3.splice(0, 1, {});
+ assertTrue(%HasFastObjectElements(a3r));
+ assertTrue(%HasFastObjectElements(a3));
+ assertEquals([1.1], a3r);
+ assertEquals([{}, 2, 3], a3);
+
+ // Pop
+ var a4 = [1,2,3];
+ assertEquals(3, a4.pop());
+ assertTrue(%HasFastSmiElements(a4));
+ a4 = [1.1,2,3];
+ assertEquals(3, a4.pop());
+ assertTrue(%HasFastDoubleElements(a4));
+ a4 = [{},2,3];
+ assertEquals(3, a4.pop());
+ assertTrue(%HasFastObjectElements(a4));
+
+ // Shift
+ var a4 = [1,2,3];
+ assertEquals(1, a4.shift());
+ assertTrue(%HasFastSmiElements(a4));
+ a4 = [1.1,2,3];
+ assertEquals(1.1, a4.shift());
+ assertTrue(%HasFastDoubleElements(a4));
+ a4 = [{},2,3];
+ assertEquals({}, a4.shift());
+ assertTrue(%HasFastObjectElements(a4));
+
+ // Unshift
+ var a4 = [1,2,3];
+ a4.unshift(1);
+ assertTrue(%HasFastSmiElements(a4));
+ assertEquals([1,1,2,3], a4);
+ a4 = [1,2,3];
+ a4.unshift(1.1);
+ // TODO(verwaest): We'll want to support double unshifting as well.
+ // assertTrue(%HasFastDoubleElements(a4));
+ assertTrue(%HasFastObjectElements(a4));
+ assertEquals([1.1,1,2,3], a4);
+ a4 = [1.1,2,3];
+ a4.unshift(1);
+ // assertTrue(%HasFastDoubleElements(a4));
+ assertTrue(%HasFastObjectElements(a4));
+ assertEquals([1,1.1,2,3], a4);
+ a4 = [{},2,3];
+ a4.unshift(1);
+ assertTrue(%HasFastObjectElements(a4));
+ assertEquals([1,{},2,3], a4);
+ a4 = [{},2,3];
+ a4.unshift(1.1);
+ assertTrue(%HasFastObjectElements(a4));
+ assertEquals([1.1,{},2,3], a4);
+}
+
+if (support_smi_only_arrays) {
+ for (var i = 0; i < 3; i++) {
+ array_natives_test();
+ }
+ %OptimizeFunctionOnNextCall(array_natives_test);
+ array_natives_test();
+}
diff --git a/src/3rdparty/v8/test/mjsunit/array-reduce.js b/src/3rdparty/v8/test/mjsunit/array-reduce.js
index 1e96188..429f348 100755
--- a/src/3rdparty/v8/test/mjsunit/array-reduce.js
+++ b/src/3rdparty/v8/test/mjsunit/array-reduce.js
@@ -418,8 +418,8 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce callback not a function not throwing TypeError");
- assertEquals("called_non_callable", e.type,
- "reduce non function TypeError type");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduce non function TypeError type");
}
assertTrue(exception);
@@ -430,8 +430,8 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight callback not a function not throwing TypeError");
- assertEquals("called_non_callable", e.type,
- "reduceRight non function TypeError type");
+ assertTrue(e.message.indexOf(" is not a function") >= 0,
+ "reduceRight non function TypeError type");
}
assertTrue(exception);
@@ -442,7 +442,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce no initial value not throwing TypeError");
- assertEquals("reduce_no_initial", e.type,
+ assertEquals("Reduce of empty array with no initial value", e.message,
"reduce no initial TypeError type");
}
assertTrue(exception);
@@ -454,7 +454,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight no initial value not throwing TypeError");
- assertEquals("reduce_no_initial", e.type,
+ assertEquals("Reduce of empty array with no initial value", e.message,
"reduceRight no initial TypeError type");
}
assertTrue(exception);
@@ -466,7 +466,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce sparse no initial value not throwing TypeError");
- assertEquals("reduce_no_initial", e.type,
+ assertEquals("Reduce of empty array with no initial value", e.message,
"reduce no initial TypeError type");
}
assertTrue(exception);
@@ -478,7 +478,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight sparse no initial value not throwing TypeError");
- assertEquals("reduce_no_initial", e.type,
+ assertEquals("Reduce of empty array with no initial value", e.message,
"reduceRight no initial TypeError type");
}
assertTrue(exception);
diff --git a/src/3rdparty/v8/test/mjsunit/array-slice.js b/src/3rdparty/v8/test/mjsunit/array-slice.js
index 5ae31dc..ae0e3bc 100644
--- a/src/3rdparty/v8/test/mjsunit/array-slice.js
+++ b/src/3rdparty/v8/test/mjsunit/array-slice.js
@@ -290,3 +290,15 @@
func('a', 'b', 'c');
})();
+
+// Check slicing of holey objects with elements in the prototype
+(function() {
+ function f() {
+ delete arguments[1];
+ arguments.__proto__[1] = 5;
+ var result = Array.prototype.slice.call(arguments);
+ delete arguments.__proto__[1];
+ assertEquals([1,5,3], result);
+ }
+ f(1,2,3);
+})();
diff --git a/src/3rdparty/v8/test/mjsunit/array-store-and-grow.js b/src/3rdparty/v8/test/mjsunit/array-store-and-grow.js
index 131d4eb..88f3db8 100644
--- a/src/3rdparty/v8/test/mjsunit/array-store-and-grow.js
+++ b/src/3rdparty/v8/test/mjsunit/array-store-and-grow.js
@@ -99,7 +99,10 @@ array_store_5(a, 1, 0.5);
a = makeCOW();
array_store_5(a, 1, 0.5);
assertEquals(0.5, a[1]);
-assertEquals(0.5, array_store_5([], 1, 0.5));
+a = [];
+assertEquals(0.5, array_store_5(a, 1, 0.5));
+assertEquals(undefined, a[0]);
+assertEquals(0.5, a[1]);
function array_store_6(a,b,c) {
return (a[b] = c);
diff --git a/src/3rdparty/v8/test/mjsunit/big-array-literal.js b/src/3rdparty/v8/test/mjsunit/big-array-literal.js
index 8e0ff87..9f06179 100644
--- a/src/3rdparty/v8/test/mjsunit/big-array-literal.js
+++ b/src/3rdparty/v8/test/mjsunit/big-array-literal.js
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// On MacOS, this test needs a stack size of at least 538 kBytes.
-// Flags: --stack-size=600
+// On MacOS X 10.7.5, this test needs a stack size of at least 788 kBytes.
+// Flags: --stack-size=800
// Test that we can make large object literals that work.
// Also test that we can attempt to make even larger object literals without
diff --git a/src/3rdparty/v8/test/mjsunit/builtins.js b/src/3rdparty/v8/test/mjsunit/builtins.js
index e43b589..062cfd5 100644
--- a/src/3rdparty/v8/test/mjsunit/builtins.js
+++ b/src/3rdparty/v8/test/mjsunit/builtins.js
@@ -54,7 +54,7 @@ function checkConstructor(func, name) {
assertFalse(proto_desc.writable, name);
assertFalse(proto_desc.configurable, name);
var prototype = proto_desc.value;
- assertEquals(null, prototype.__proto__, name);
+ assertEquals(null, Object.getPrototypeOf(prototype), name);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (propName == "constructor") continue;
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/inline-closures.js b/src/3rdparty/v8/test/mjsunit/compiler/inline-closures.js
new file mode 100644
index 0000000..69161e5
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/inline-closures.js
@@ -0,0 +1,49 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test inlining of multiple closures derived from one shared function.
+
+function mkClosure(continuation) {
+ return function(value) {
+ if (continuation == 'g') return this.g(value);
+ if (continuation == 'h') return this.h(value);
+ return value.value;
+ }
+}
+
+var object = {};
+object.f = mkClosure('g');
+object.g = mkClosure('h');
+object.h = mkClosure('x');
+
+assertSame(1, object.f({value:1}));
+assertSame(2, object.f({value:2}));
+%OptimizeFunctionOnNextCall(object.f);
+assertSame(3, object.f({value:3}));
+assertSame(undefined, object.f({}));
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/inline-function-apply.js b/src/3rdparty/v8/test/mjsunit/compiler/inline-function-apply.js
new file mode 100644
index 0000000..6b761f4
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/inline-function-apply.js
@@ -0,0 +1,89 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test inlining and deoptimization of function.apply(this, arguments)
+// calls for which the exact number of arguments is known.
+(function () {
+ "use strict";
+ function test(argumentsCount) {
+ var dispatcher = {};
+ var deoptimize = { deopt:false };
+ dispatcher["const" + argumentsCount] = 0;
+ dispatcher.func = C;
+
+ function A(x,y) {
+ var r = "A";
+ if (argumentsCount == 1) r += B(10);
+ if (argumentsCount == 2) r += B(10, 11);
+ if (argumentsCount == 3) r += B(10, 11, 12);
+ assertSame(1, x);
+ assertSame(2, y);
+ return r;
+ }
+
+ function B(x,y) {
+ x = 0; y = 0;
+ var r = "B" + dispatcher.func.apply(this, arguments);
+ assertSame(argumentsCount, arguments.length);
+ for (var i = 0; i < arguments.length; i++) {
+ assertSame(10 + i, arguments[i]);
+ }
+ return r;
+ }
+
+ function C(x,y) {
+ x = 0; y = 0;
+ var r = "C"
+ deoptimize.deopt;
+ assertSame(argumentsCount, arguments.length);
+ for (var i = 0; i < arguments.length; i++) {
+ assertSame(10 + i, arguments[i]);
+ }
+ return r;
+ }
+
+ assertEquals("ABC", A(1,2));
+ assertEquals("ABC", A(1,2));
+ %OptimizeFunctionOnNextCall(A);
+ assertEquals("ABC", A(1,2));
+ delete deoptimize.deopt;
+ assertEquals("ABC", A(1,2));
+
+ %DeoptimizeFunction(A);
+ %ClearFunctionTypeFeedback(A);
+ %DeoptimizeFunction(B);
+ %ClearFunctionTypeFeedback(B);
+ %DeoptimizeFunction(C);
+ %ClearFunctionTypeFeedback(C);
+ }
+
+ for (var a = 1; a <= 3; a++) {
+ test(a);
+ }
+})();
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/multiply-add.js b/src/3rdparty/v8/test/mjsunit/compiler/multiply-add.js
new file mode 100644
index 0000000..2b4304e
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/multiply-add.js
@@ -0,0 +1,69 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Test expressions that can be computed with a multiply-add instruction.
+
+function f(a, b, c) {
+ return a * b + c;
+}
+
+function g(a, b, c) {
+ return a + b * c;
+}
+
+function h(a, b, c, d) {
+ return a * b + c * d;
+}
+
+assertEquals(5, f(1, 2, 3));
+assertEquals(5, f(1, 2, 3));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(5, f(1, 2, 3));
+assertEquals("2foo", f(1, 2, "foo"));
+assertEquals(5.41, f(1.1, 2.1, 3.1));
+assertEquals(5.41, f(1.1, 2.1, 3.1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(5.41, f(1.1, 2.1, 3.1));
+
+assertEquals(7, g(1, 2, 3));
+assertEquals(7, g(1, 2, 3));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(7, g(1, 2, 3));
+assertEquals(8.36, g(1.1, 2.2, 3.3));
+assertEquals(8.36, g(1.1, 2.2, 3.3));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(8.36, g(1.1, 2.2, 3.3));
+
+assertEquals(14, h(1, 2, 3, 4));
+assertEquals(14, h(1, 2, 3, 4));
+%OptimizeFunctionOnNextCall(h);
+assertEquals(14, h(1, 2, 3, 4));
+assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
+assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
+%OptimizeFunctionOnNextCall(h);
+assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/multiply-sub.js b/src/3rdparty/v8/test/mjsunit/compiler/multiply-sub.js
new file mode 100644
index 0000000..4793181
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/multiply-sub.js
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Test expressions that can be computed with a multiply-add instruction.
+
+function f(a, b, c) {
+ return a - b * c;
+}
+
+function g(a, b, c) {
+ return a * b - c;
+}
+
+function h(a, b, c, d) {
+ return a * b - c * d;
+}
+
+assertEquals(-5.41, f(1.1, 2.1, 3.1));
+assertEquals(-5.41, f(1.1, 2.1, 3.1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(-5.41, f(1.1, 2.1, 3.1));
+
+assertEquals(8.36, g(2.2, 3.3, -1.1));
+assertEquals(8.36, g(2.2, 3.3, -1.1));
+%OptimizeFunctionOnNextCall(g);
+assertEquals(8.36, g(2.2, 3.3, -1.1));
+
+assertEquals(-1.5, h(1.5, 3.0, 12, 0.5));
+assertEquals(-1.5, h(1.5, 3.0, 12, 0.5));
+%OptimizeFunctionOnNextCall(h);
+assertEquals(-1.5, h(1.5, 3.0, 12, 0.5));
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/parallel-proto-change.js b/src/3rdparty/v8/test/mjsunit/compiler/parallel-proto-change.js
new file mode 100644
index 0000000..1aa135a
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/parallel-proto-change.js
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+// Flags: --parallel-recompilation --manual-parallel-recompilation
+
+function f(foo) { return foo.bar(); }
+
+var o = {};
+o.__proto__ = { __proto__: { bar: function() { return 1; } } };
+
+assertEquals(1, f(o));
+assertEquals(1, f(o));
+
+%ForceParallelRecompile(f);
+// Change the prototype chain during optimization.
+o.__proto__.__proto__ = { bar: function() { return 2; } };
+%InstallRecompiledCode(f);
+
+assertEquals(2, f(o));
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/property-static.js b/src/3rdparty/v8/test/mjsunit/compiler/property-static.js
new file mode 100644
index 0000000..0702134
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/property-static.js
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test usage of static type information for loads that would otherwise
+// turn into polymorphic or generic loads.
+
+// Prepare a highly polymorphic load to be used by all tests.
+Object.prototype.load = function() { return this.property; };
+Object.prototype.load.call({ A:0, property:10 });
+Object.prototype.load.call({ A:0, B:0, property:11 });
+Object.prototype.load.call({ A:0, B:0, C:0, property:12 });
+Object.prototype.load.call({ A:0, B:0, C:0, D:0, property:13 });
+Object.prototype.load.call({ A:0, B:0, C:0, D:0, E:0, property:14 });
+Object.prototype.load.call({ A:0, B:0, C:0, D:0, E:0, F:0, property:15 });
+
+// Test for object literals.
+(function() {
+ function f(x) {
+ var object = { property:x };
+ return object.load();
+ }
+
+ assertSame(1, f(1));
+ assertSame(2, f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertSame(3, f(3));
+})();
+
+// Test for inlined constructors.
+(function() {
+ function c(x) {
+ this.property = x;
+ }
+ function f(x) {
+ var object = new c(x);
+ return object.load();
+ }
+
+ assertSame(1, f(1));
+ assertSame(2, f(2));
+ %OptimizeFunctionOnNextCall(f);
+ assertSame(3, f(3));
+})();
diff --git a/src/3rdparty/v8/test/mjsunit/compiler/proto-chain-constant.js b/src/3rdparty/v8/test/mjsunit/compiler/proto-chain-constant.js
new file mode 100644
index 0000000..0d9e3b0
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/compiler/proto-chain-constant.js
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test loading a constant function on the prototype chain.
+
+var c = Object.create;
+var obj4 = c(null, { f4: { value: function() { return 4; }, writable: true }});
+var obj3 = c(obj4, { f3: { value: function() { return 3; }, writable: true }});
+var obj2 = c(obj3, { f2: { value: function() { return 2; }, writable: true }});
+var obj1 = c(obj2, { f1: { value: function() { return 1; }, writable: true }});
+var obj0 = c(obj1, { f0: { value: function() { return 0; }, writable: true }});
+
+function get4(obj) { return obj.f4; }
+
+assertEquals(4, get4(obj0)());
+assertEquals(4, get4(obj0)());
+%OptimizeFunctionOnNextCall(get4);
+assertEquals(4, get4(obj0)());
+obj4.f4 = function() { return 5; };
+assertEquals(5, get4(obj0)());
+
+function get3(obj) { return obj.f3; }
+
+assertEquals(3, get3(obj0)());
+assertEquals(3, get3(obj0)());
+%OptimizeFunctionOnNextCall(get3);
+assertEquals(3, get3(obj0)());
+obj2.f3 = function() { return 6; };
+assertEquals(6, get3(obj0)());
diff --git a/src/3rdparty/v8/test/mjsunit/constant-folding-2.js b/src/3rdparty/v8/test/mjsunit/constant-folding-2.js
new file mode 100644
index 0000000..6dbb4ab
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/constant-folding-2.js
@@ -0,0 +1,258 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Flags: --nodead-code-elimination --fold-constants --allow-natives-syntax
+
+function test(f) {
+ f();
+ f();
+ %OptimizeFunctionOnNextCall(f);
+ f();
+ // Assert that there has been no deopt.
+ assertTrue(%GetOptimizationStatus(f) != 2);
+}
+
+test(function add() {
+ assertEquals(2, 1 + 1);
+ assertEquals(2.5, 1.25 + 1.25);
+ assertEquals("Infinity", String(Infinity + Infinity));
+ assertEquals("Infinity", String(Infinity + 3));
+ assertEquals("NaN", String(Infinity + (-Infinity)));
+ assertEquals("NaN", String(NaN + 2));
+ assertEquals("-Infinity", String(1 / (-0.0 + (-0.0))));
+ assertEquals("Infinity", String(1 / (-0.0 + 0.0)));
+});
+
+test(function inc() {
+ var a = 1;
+ var b = Infinity;
+ var c = -Infinity;
+ var d = NaN;
+ assertEquals(2, ++a);
+ assertEquals("Infinity", String(++b));
+ assertEquals("-Infinity", String(++c));
+ assertEquals("NaN", String(++d));
+});
+
+test(function dec() {
+ var a = 1;
+ var b = Infinity;
+ var c = -Infinity;
+ var d = NaN;
+ assertEquals(0, --a);
+ assertEquals("Infinity", String(--b));
+ assertEquals("-Infinity", String(--c));
+ assertEquals("NaN", String(--d));
+});
+
+test(function sub() {
+ assertEquals(0, 1 - 1);
+ assertEquals(0.5, 1.5 - 1);
+ assertEquals("Infinity", String(Infinity - (-Infinity)));
+ assertEquals("Infinity", String(Infinity - 3));
+ assertEquals("NaN", String(Infinity - Infinity));
+ assertEquals("NaN", String(NaN - 2));
+ assertEquals("-Infinity", String(1 / (-0.0 - 0.0)));
+ assertEquals("Infinity", String(1 / (0.0 - 0.0)));
+});
+
+test(function mul() {
+ assertEquals(1, 1 * 1);
+ assertEquals(2.25, 1.5 * 1.5);
+ assertEquals("Infinity", String(Infinity * Infinity));
+ assertEquals("-Infinity", String(Infinity * (-Infinity)));
+ assertEquals("Infinity", String(Infinity * 3));
+ assertEquals("-Infinity", String(Infinity * (-3)));
+ assertEquals("NaN", String(NaN * 3));
+ assertEquals("-Infinity", String(1 / (-0.0 * 0.0)));
+ assertEquals("Infinity", String(1 / (0.0 * 0.0)));
+});
+
+test(function div() {
+ assertEquals(1, 1 / 1);
+ assertEquals(1.5, 2.25 / 1.5);
+ assertEquals("NaN", String(Infinity / Infinity));
+ assertEquals("Infinity", String(Infinity / 3));
+ assertEquals("-Infinity", String(Infinity / (-3)));
+ assertEquals("NaN", String(NaN / 3));
+ assertEquals("-Infinity", String(1 / (-0.0)));
+ assertEquals("Infinity", String(Infinity/0.0));
+});
+
+test(function mathMin() {
+ assertEquals(1, Math.min(1, 10));
+ assertEquals(1.5, Math.min(1.5, 2.5));
+ assertEquals(0, Math.min(Infinity, 0));
+ assertEquals("Infinity", String(Math.min(Infinity, Infinity)));
+ assertEquals("-Infinity", String(Math.min(Infinity, -Infinity)));
+ assertEquals("NaN", String(Math.min(NaN, 1)));
+ assertEquals("Infinity", String(1 / Math.min(0.0, 0.0)));
+ assertEquals("-Infinity", String(1 / Math.min(-0.0, -0.0)));
+ assertEquals("-Infinity", String(1 / Math.min(0.0, -0.0)));
+});
+
+test(function mathMax() {
+ assertEquals(10, Math.max(1, 10));
+ assertEquals(2.5, Math.max(1.5, 2.5));
+ assertEquals(Infinity, Math.max(Infinity, 0));
+ assertEquals("-Infinity", String(Math.max(-Infinity, -Infinity)));
+ assertEquals("Infinity", String(Math.max(Infinity, -Infinity)));
+ assertEquals("NaN", String(Math.max(NaN, 1)));
+ assertEquals("Infinity", String(1 / Math.max(0.0, 0.0)));
+ assertEquals("-Infinity", String(1 / Math.max(-0.0, -0.0)));
+ assertEquals("Infinity", String(1 / Math.max(0.0, -0.0)));
+});
+
+test(function mathSin() {
+ assertEquals(0.0, Math.sin(0.0));
+ assertTrue(0.8 < Math.sin(1) && Math.sin(1) < 0.9);
+ assertEquals("NaN", String(Math.sin(Infinity)));
+ assertEquals("NaN", String(Math.sin(-Infinity)));
+ assertEquals("NaN", String(Math.sin(NaN)));
+});
+
+test(function mathCos() {
+ assertEquals(1.0, Math.cos(0.0));
+ assertTrue(0.5 < Math.cos(1) && Math.cos(1) < 0.6);
+ assertEquals("NaN", String(Math.cos(Infinity)));
+ assertEquals("NaN", String(Math.cos(-Infinity)));
+ assertEquals("NaN", String(Math.cos(NaN)));
+});
+
+test(function mathTan() {
+ assertEquals(0.0, Math.tan(0.0));
+ assertTrue(1.5 < Math.tan(1) && Math.tan(1) < 1.6);
+ assertEquals("NaN", String(Math.tan(Infinity)));
+ assertEquals("NaN", String(Math.tan(-Infinity)));
+ assertEquals("NaN", String(Math.tan(NaN)));
+});
+
+test(function mathExp() {
+ assertEquals(1.0, Math.exp(0.0));
+ assertTrue(2.7 < Math.exp(1) && Math.exp(1) < 2.8);
+ assertEquals("Infinity", String(Math.exp(Infinity)));
+ assertEquals("0", String(Math.exp(-Infinity)));
+ assertEquals("NaN", String(Math.exp(NaN)));
+});
+
+test(function mathLog() {
+ assertEquals(0.0, Math.log(1.0));
+ assertTrue(1 < Math.log(3) && Math.log(3) < 1.5);
+ assertEquals("Infinity", String(Math.log(Infinity)));
+ assertEquals("NaN", String(Math.log(-Infinity)));
+ assertEquals("NaN", String(Math.exp(NaN)));
+});
+
+test(function mathSqrt() {
+ assertEquals(1.0, Math.sqrt(1.0));
+ assertEquals("NaN", String(Math.sqrt(-1.0)));
+ assertEquals("Infinity", String(Math.sqrt(Infinity)));
+ assertEquals("NaN", String(Math.sqrt(-Infinity)));
+ assertEquals("NaN", String(Math.sqrt(NaN)));
+});
+
+test(function mathPowHalf() {
+ assertEquals(1.0, Math.pow(1.0, 0.5));
+ assertEquals("NaN", String(Math.sqrt(-1.0)));
+ assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
+ assertEquals("NaN", String(Math.sqrt(-Infinity, 0.5)));
+ assertEquals(0, Math.pow(Infinity, -0.5));
+ assertEquals("NaN", String(Math.sqrt(-Infinity, -0.5)));
+ assertEquals("NaN", String(Math.sqrt(NaN, 0.5)));
+});
+
+test(function mathAbs() {
+ assertEquals(1.5, Math.abs(1.5));
+ assertEquals(1.5, Math.abs(-1.5));
+ assertEquals("Infinity", String(Math.abs(Infinity)));
+ assertEquals("Infinity", String(Math.abs(-Infinity)));
+ assertEquals("NaN", String(Math.abs(NaN)));
+});
+
+test(function mathRound() {
+ assertEquals(2, Math.round(1.5));
+ assertEquals(-1, Math.round(-1.5));
+ assertEquals("Infinity", String(Math.round(Infinity)));
+ assertEquals("-Infinity", String(Math.round(-Infinity)));
+ assertEquals("Infinity", String(1 / Math.round(0.0)));
+ assertEquals("-Infinity", String(1 / Math.round(-0.0)));
+ assertEquals("NaN", String(Math.round(NaN)));
+ assertEquals(Math.pow(2, 52) + 1, Math.round(Math.pow(2, 52) + 1));
+});
+
+test(function mathFloor() {
+ assertEquals(1, Math.floor(1.5));
+ assertEquals(-2, Math.floor(-1.5));
+ assertEquals("Infinity", String(Math.floor(Infinity)));
+ assertEquals("-Infinity", String(Math.floor(-Infinity)));
+ assertEquals("Infinity", String(1 / Math.floor(0.0)));
+ assertEquals("-Infinity", String(1 / Math.floor(-0.0)));
+ assertEquals("NaN", String(Math.floor(NaN)));
+ assertEquals(Math.pow(2, 52) + 1, Math.floor(Math.pow(2, 52) + 1));
+});
+
+test(function mathPow() {
+ assertEquals(2.25, Math.pow(1.5, 2));
+ assertTrue(1.8 < Math.pow(1.5, 1.5) && Math.pow(1.5, 1.5) < 1.9);
+ assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
+ assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
+ assertEquals(0, Math.pow(Infinity, -0.5));
+ assertEquals(0, Math.pow(Infinity, -0.5));
+ assertEquals("Infinity", String(Math.pow(Infinity, Infinity)));
+ assertEquals(0, Math.pow(Infinity, -Infinity));
+ assertEquals("NaN", String(Math.pow(Infinity, NaN)));
+ assertEquals("NaN", String(Math.pow(NaN, 2)));
+});
+
+test(function stringAdd() {
+ assertEquals("", "" + "");
+ assertEquals("folded constant", "folded " + "constant");
+ assertEquals("not folded constant1", "not folded constant" + 1);
+});
+
+test(function stringLength() {
+ assertEquals(6, "abcdef".length);
+ assertEquals(0, "".length);
+ assertEquals(-5, { length: -5 }.length);
+});
+
+test(function stringCharCodeAt() {
+ assertEquals(99, "abc".charCodeAt(2));
+ assertEquals("NaN", String("abc".charCodeAt(-1)));
+ assertEquals("NaN", String("abc".charCodeAt(4)));
+ assertEquals(98, "abc".charCodeAt(1.1));
+ assertEquals("NaN", String("abc".charCodeAt(4.1)));
+});
+
+test(function stringCharAt() {
+ assertEquals("c", "abc".charAt(2));
+ assertEquals("", "abc".charAt(-1));
+ assertEquals("", "abc".charAt(4));
+ assertEquals("b", "abc".charAt(1.1));
+ assertEquals("", "abc".charAt(4.1));
+});
diff --git a/src/3rdparty/v8/test/mjsunit/debug-liveedit-compile-error.js b/src/3rdparty/v8/test/mjsunit/debug-liveedit-compile-error.js
new file mode 100644
index 0000000..2fd6aed
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/debug-liveedit-compile-error.js
@@ -0,0 +1,60 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+eval("var something1 = 25; \n"
+ + " function ChooseAnimal() { return 'Cat'; } \n"
+ + " ChooseAnimal.Helper = function() { return 'Help!'; }\n");
+
+assertEquals("Cat", ChooseAnimal());
+
+var script = Debug.findScript(ChooseAnimal);
+
+var orig_animal = "Cat";
+var patch_pos = script.source.indexOf(orig_animal);
+var new_animal_patch = "Cap' + ) + 'bara";
+
+var change_log = new Array();
+var caught_exception = null;
+try {
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
+ orig_animal.length, new_animal_patch, change_log);
+} catch (e) {
+ caught_exception = e;
+}
+
+assertNotNull(caught_exception);
+assertEquals("Unexpected token )",
+ caught_exception.details.syntaxErrorMessage);
+
+assertEquals(2, caught_exception.details.position.start.line);
+
+
diff --git a/src/3rdparty/v8/test/mjsunit/debug-liveedit-literals.js b/src/3rdparty/v8/test/mjsunit/debug-liveedit-literals.js
new file mode 100644
index 0000000..5f9217e
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/debug-liveedit-literals.js
@@ -0,0 +1,94 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+// Get the Debug object exposed from the debug context global object.
+
+Debug = debug.Debug
+
+function Test(old_expression, new_expression) {
+ // Generate several instances of function to test that we correctly fix
+ // all functions in memory.
+ var function_instance_number = 11;
+ eval("var t1 =1;\n" +
+ "ChooseAnimalArray = [];\n" +
+ "for (var i = 0; i < function_instance_number; i++) {\n" +
+ " ChooseAnimalArray.push(\n" +
+ " function ChooseAnimal() {\n" +
+ " return " + old_expression + ";\n" +
+ " });\n" +
+ "}\n" +
+ "var t2 =1;\n");
+
+ for (var i = 0; i < ChooseAnimalArray.length; i++) {
+ assertEquals("Cat", ChooseAnimalArray[i]());
+ }
+
+ var script = Debug.findScript(ChooseAnimalArray[0]);
+
+ var patch_pos = script.source.indexOf(old_expression);
+ var new_animal_patch = new_expression;
+
+ var change_log = new Array();
+ Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
+ old_expression.length, new_expression, change_log);
+
+ for (var i = 0; i < ChooseAnimalArray.length; i++) {
+ assertEquals("Capybara", ChooseAnimalArray[i]());
+ }
+}
+
+// Check that old literal boilerplate was reset.
+Test("['Cat'][0]", "['Capybara'][0]");
+Test("['Cat'][0]", "{a:'Capybara'}.a");
+
+// No literals -> 1 literal.
+Test("'Cat'", "['Capybara'][0]");
+
+// No literals -> 2 literals.
+Test("'Cat'", "['Capy'][0] + {a:'bara'}.a");
+
+// 1 literal -> no literals.
+Test("['Cat'][0]", "'Capybara'");
+
+// 2 literals -> no literals.
+Test("['Ca'][0] + {a:'t'}.a", "'Capybara'");
+
+// No literals -> regexp.
+Test("'Cat'", "(/.A.Y.A.A/i).exec('Capybara')[0]");
+
+// Array literal -> regexp.
+Test("['Cat'][0]", "(/.A.Y.A.A/i).exec('Capybara')[0]");
+
+// Regexp -> object literal.
+Test("(/.A./i).exec('Cat')[0]", "{c:'Capybara'}.c");
+
+// No literals -> regexp.
+Test("'Cat'", "(/.A.Y.A.A/i).exec('Capybara')[0]");
+
+// Regexp -> no literals.
+Test("(/.A./i).exec('Cat')[0]", "'Capybara'");
diff --git a/src/3rdparty/v8/test/mjsunit/debug-set-variable-value.js b/src/3rdparty/v8/test/mjsunit/debug-set-variable-value.js
new file mode 100644
index 0000000..4667a71
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/debug-set-variable-value.js
@@ -0,0 +1,308 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+
+// Get the Debug object exposed from the debug context global object.
+var Debug = debug.Debug;
+
+// Accepts a function/closure 'fun' that must have a debugger statement inside.
+// A variable 'variable_name' must be initialized before debugger statement
+// and returned after the statement. The test will alter variable value when
+// on debugger statement and check that returned value reflects the change.
+function RunPauseTest(scope_number, expected_old_result, variable_name,
+ new_value, expected_new_result, fun) {
+ var actual_old_result = fun();
+ assertEquals(expected_old_result, actual_old_result);
+
+ var listener_delegate;
+ var listener_called = false;
+ var exception = null;
+
+ function listener_delegate(exec_state) {
+ var scope = exec_state.frame(0).scope(scope_number);
+ scope.setVariableValue(variable_name, new_value);
+ }
+
+ function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ listener_called = true;
+ listener_delegate(exec_state);
+ }
+ } catch (e) {
+ exception = e;
+ }
+ }
+
+ // Add the debug event listener.
+ Debug.setListener(listener);
+
+ var actual_new_value;
+ try {
+ actual_new_result = fun();
+ } finally {
+ Debug.setListener(null);
+ }
+
+ if (exception != null) {
+ assertUnreachable("Exception in listener\n" + exception.stack);
+ }
+ assertTrue(listener_called);
+
+ assertEquals(expected_new_result, actual_new_result);
+}
+
+// Accepts a closure 'fun' that returns a variable from it's outer scope.
+// The test changes the value of variable via the handle to function and checks
+// that the return value changed accordingly.
+function RunClosureTest(scope_number, expected_old_result, variable_name,
+ new_value, expected_new_result, fun) {
+ var actual_old_result = fun();
+ assertEquals(expected_old_result, actual_old_result);
+
+ var fun_mirror = Debug.MakeMirror(fun);
+
+ var scope = fun_mirror.scope(scope_number);
+ scope.setVariableValue(variable_name, new_value);
+
+ var actual_new_result = fun();
+
+ assertEquals(expected_new_result, actual_new_result);
+}
+
+
+function ClosureTestCase(scope_index, old_result, variable_name, new_value,
+ new_result, success_expected, factory) {
+ this.scope_index_ = scope_index;
+ this.old_result_ = old_result;
+ this.variable_name_ = variable_name;
+ this.new_value_ = new_value;
+ this.new_result_ = new_result;
+ this.success_expected_ = success_expected;
+ this.factory_ = factory;
+}
+
+ClosureTestCase.prototype.run_pause_test = function() {
+ var th = this;
+ var fun = this.factory_(true);
+ this.run_and_catch_(function() {
+ RunPauseTest(th.scope_index_ + 1, th.old_result_, th.variable_name_,
+ th.new_value_, th.new_result_, fun);
+ });
+}
+
+ClosureTestCase.prototype.run_closure_test = function() {
+ var th = this;
+ var fun = this.factory_(false);
+ this.run_and_catch_(function() {
+ RunClosureTest(th.scope_index_, th.old_result_, th.variable_name_,
+ th.new_value_, th.new_result_, fun);
+ });
+}
+
+ClosureTestCase.prototype.run_and_catch_ = function(runnable) {
+ if (this.success_expected_) {
+ runnable();
+ } else {
+ assertThrows(runnable);
+ }
+}
+
+
+// Test scopes visible from closures.
+
+var closure_test_cases = [
+ new ClosureTestCase(0, 'cat', 'v1', 5, 5, true,
+ function Factory(debug_stop) {
+ var v1 = 'cat';
+ return function() {
+ if (debug_stop) debugger;
+ return v1;
+ }
+ }),
+
+ new ClosureTestCase(0, 4, 't', 7, 9, true, function Factory(debug_stop) {
+ var t = 2;
+ var r = eval("t");
+ return function() {
+ if (debug_stop) debugger;
+ return r + t;
+ }
+ }),
+
+ new ClosureTestCase(0, 6, 't', 10, 13, true, function Factory(debug_stop) {
+ var t = 2;
+ var r = eval("t = 3");
+ return function() {
+ if (debug_stop) debugger;
+ return r + t;
+ }
+ }),
+
+ new ClosureTestCase(0, 17, 's', 'Bird', 'Bird', true,
+ function Factory(debug_stop) {
+ eval("var s = 17");
+ return function() {
+ if (debug_stop) debugger;
+ return s;
+ }
+ }),
+
+ new ClosureTestCase(2, 'capybara', 'foo', 77, 77, true,
+ function Factory(debug_stop) {
+ var foo = "capybara";
+ return (function() {
+ var bar = "fish";
+ try {
+ throw {name: "test exception"};
+ } catch (e) {
+ return function() {
+ if (debug_stop) debugger;
+ bar = "beast";
+ return foo;
+ }
+ }
+ })();
+ }),
+
+ new ClosureTestCase(0, 'AlphaBeta', 'eee', 5, '5Beta', true,
+ function Factory(debug_stop) {
+ var foo = "Beta";
+ return (function() {
+ var bar = "fish";
+ try {
+ throw "Alpha";
+ } catch (eee) {
+ return function() {
+ if (debug_stop) debugger;
+ return eee + foo;
+ }
+ }
+ })();
+ })
+];
+
+for (var i = 0; i < closure_test_cases.length; i++) {
+ closure_test_cases[i].run_pause_test();
+}
+
+for (var i = 0; i < closure_test_cases.length; i++) {
+ closure_test_cases[i].run_closure_test();
+}
+
+
+// Test local scope.
+
+RunPauseTest(0, 'HelloYou', 'u', 'We', 'HelloWe', (function Factory() {
+ return function() {
+ var u = "You";
+ var v = "Hello";
+ debugger;
+ return v + u;
+ }
+})());
+
+RunPauseTest(0, 'Helloworld', 'p', 'GoodBye', 'HelloGoodBye',
+ (function Factory() {
+ function H(p) {
+ var v = "Hello";
+ debugger;
+ return v + p;
+ }
+ return function() {
+ return H("world");
+ }
+})());
+
+RunPauseTest(0, 'mouse', 'v1', 'dog', 'dog', (function Factory() {
+ return function() {
+ var v1 = 'cat';
+ eval("v1 = 'mouse'");
+ debugger;
+ return v1;
+ }
+})());
+
+RunPauseTest(0, 'mouse', 'v1', 'dog', 'dog', (function Factory() {
+ return function() {
+ eval("var v1 = 'mouse'");
+ debugger;
+ return v1;
+ }
+})());
+
+
+// Check that we correctly update local variable that
+// is referenced from an inner closure.
+RunPauseTest(0, 'Blue', 'v', 'Green', 'Green', (function Factory() {
+ return function() {
+ function A() {
+ var v = "Blue";
+ function Inner() {
+ return void v;
+ }
+ debugger;
+ return v;
+ }
+ return A();
+ }
+})());
+
+// Check that we correctly update parameter, that is known to be stored
+// both on stack and in heap.
+RunPauseTest(0, 5, 'p', 2012, 2012, (function Factory() {
+ return function() {
+ function A(p) {
+ function Inner() {
+ return void p;
+ }
+ debugger;
+ return p;
+ }
+ return A(5);
+ }
+})());
+
+
+// Test value description protocol JSON
+
+assertEquals(true, Debug.TestApi.CommandProcessorResolveValue({value: true}));
+
+assertSame(null, Debug.TestApi.CommandProcessorResolveValue({type: "null"}));
+assertSame(undefined,
+ Debug.TestApi.CommandProcessorResolveValue({type: "undefined"}));
+
+assertSame("123", Debug.TestApi.CommandProcessorResolveValue(
+ {type: "string", stringDescription: "123"}));
+assertSame(123, Debug.TestApi.CommandProcessorResolveValue(
+ {type: "number", stringDescription: "123"}));
+
+assertSame(Number, Debug.TestApi.CommandProcessorResolveValue(
+ {handle: Debug.MakeMirror(Number).handle()}));
+assertSame(RunClosureTest, Debug.TestApi.CommandProcessorResolveValue(
+ {handle: Debug.MakeMirror(RunClosureTest).handle()}));
diff --git a/src/3rdparty/v8/test/mjsunit/elements-kind.js b/src/3rdparty/v8/test/mjsunit/elements-kind.js
index b74a212..92bdac7 100644
--- a/src/3rdparty/v8/test/mjsunit/elements-kind.js
+++ b/src/3rdparty/v8/test/mjsunit/elements-kind.js
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --notrack_allocation_sites
// Test element kind of objects.
// Since --smi-only-arrays affects builtins, its default setting at compile
@@ -321,8 +322,7 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_double, b);
var c = a.concat(b);
assertEquals([1, 2, 4.5, 5.5], c);
- // TODO(1810): Change implementation so that we get DOUBLE elements here?
- assertKind(elements_kind.fast, c);
+ assertKind(elements_kind.fast_double, c);
}
// Test that Array.push() correctly handles SMI elements.
diff --git a/src/3rdparty/v8/test/mjsunit/elements-transition.js b/src/3rdparty/v8/test/mjsunit/elements-transition.js
index 0dffd37..e28f3c3 100644
--- a/src/3rdparty/v8/test/mjsunit/elements-transition.js
+++ b/src/3rdparty/v8/test/mjsunit/elements-transition.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax --smi-only-arrays --notrack-allocation-sites
support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
diff --git a/src/3rdparty/v8/test/mjsunit/error-constructors.js b/src/3rdparty/v8/test/mjsunit/error-constructors.js
index 107164d..84c6bbf 100644
--- a/src/3rdparty/v8/test/mjsunit/error-constructors.js
+++ b/src/3rdparty/v8/test/mjsunit/error-constructors.js
@@ -36,10 +36,6 @@ assertFalse(desc['enumerable']);
var e = new Error("foobar");
desc = Object.getOwnPropertyDescriptor(e, 'message');
assertFalse(desc['enumerable']);
-desc = Object.getOwnPropertyDescriptor(e, 'arguments');
-assertFalse(desc['enumerable']);
-desc = Object.getOwnPropertyDescriptor(e, 'type');
-assertFalse(desc['enumerable']);
desc = Object.getOwnPropertyDescriptor(e, 'stack');
assertFalse(desc['enumerable']);
@@ -57,26 +53,17 @@ for (var v in e) {
function fail() { assertUnreachable(); };
ReferenceError.prototype.__defineSetter__('name', fail);
ReferenceError.prototype.__defineSetter__('message', fail);
-ReferenceError.prototype.__defineSetter__('type', fail);
-ReferenceError.prototype.__defineSetter__('arguments', fail);
ReferenceError.prototype.__defineSetter__('stack', fail);
var e = new ReferenceError();
assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
var e = new ReferenceError('123');
assertTrue(e.hasOwnProperty('message'));
assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
var e = %MakeReferenceError("my_test_error", [0, 1]);
assertTrue(e.hasOwnProperty('stack'));
-assertTrue(e.hasOwnProperty('type'));
-assertTrue(e.hasOwnProperty('arguments'));
-assertEquals("my_test_error", e.type)
// Check that intercepting property access from toString is prevented for
// compiler errors. This is not specified, but allowing interception
@@ -86,7 +73,7 @@ var errors = [SyntaxError, ReferenceError, TypeError];
for (var i in errors) {
var name = errors[i].prototype.toString();
// Monkey-patch prototype.
- var props = ["name", "message", "type", "arguments", "stack"];
+ var props = ["name", "message", "stack"];
for (var j in props) {
errors[i].prototype.__defineGetter__(props[j], fail);
}
diff --git a/src/3rdparty/v8/test/mjsunit/error-tostring.js b/src/3rdparty/v8/test/mjsunit/error-tostring.js
index a285641..8a8a969 100644
--- a/src/3rdparty/v8/test/mjsunit/error-tostring.js
+++ b/src/3rdparty/v8/test/mjsunit/error-tostring.js
@@ -83,3 +83,11 @@ assertEquals(["Error: e2",[1,3,4]], testErrorToString(undefined, "e2"));
assertEquals(["null: e2",[1,2,3,4]], testErrorToString(null, "e2"));
assertEquals(["e2",[1,2,3,4]], testErrorToString("", "e2"));
assertEquals(["e1: e2",[1,2,3,4]], testErrorToString("e1", "e2"));
+
+var obj = {
+ get constructor () {
+ assertUnreachable();
+ }
+};
+
+assertThrows(function() { obj.x(); });
diff --git a/src/3rdparty/v8/test/mjsunit/eval-stack-trace.js b/src/3rdparty/v8/test/mjsunit/eval-stack-trace.js
index 723d522..d83b84c 100644
--- a/src/3rdparty/v8/test/mjsunit/eval-stack-trace.js
+++ b/src/3rdparty/v8/test/mjsunit/eval-stack-trace.js
@@ -26,12 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Return the stack frames of an Error object.
+
+Error.prepareStackTrace = function(error, frames) {
+ return frames;
+}
+
Error.prototype.getFrames = function() {
- Error.prepareStackTrace = function(error, frames) {
- return frames;
- }
var frames = this.stack;
- Error.prepareStackTrace = undefined;
return frames;
}
diff --git a/src/3rdparty/v8/test/mjsunit/fast-prototype.js b/src/3rdparty/v8/test/mjsunit/fast-prototype.js
index 7fd73a4..83bcffe 100644
--- a/src/3rdparty/v8/test/mjsunit/fast-prototype.js
+++ b/src/3rdparty/v8/test/mjsunit/fast-prototype.js
@@ -27,6 +27,10 @@
// Flags: --allow-natives-syntax
+// TODO(mstarzinger): This test does not succeed when GCs happen in
+// between prototype transitions, we disable GC stress for now.
+// Flags: --noincremental-marking
+
// Check that objects that are used for prototypes are in the fast mode.
function Super() {
diff --git a/src/3rdparty/v8/test/mjsunit/function-call.js b/src/3rdparty/v8/test/mjsunit/function-call.js
index 26890ed..92792ac 100644
--- a/src/3rdparty/v8/test/mjsunit/function-call.js
+++ b/src/3rdparty/v8/test/mjsunit/function-call.js
@@ -67,8 +67,7 @@ var should_throw_on_null_and_undefined =
String.prototype.toLocaleLowerCase,
String.prototype.toUpperCase,
String.prototype.toLocaleUpperCase,
- String.prototype.trim,
- Number.prototype.toLocaleString];
+ String.prototype.trim];
// Non generic natives do not work on any input other than the specific
// type, but since this change will allow call to be invoked with undefined
@@ -150,6 +149,11 @@ var reducing_functions =
[Array.prototype.reduce,
Array.prototype.reduceRight];
+function checkExpectedMessage(e) {
+ assertTrue(e.message.indexOf("called on null or undefined") >= 0 ||
+ e.message.indexOf("Cannot convert null to object") >= 0);
+}
+
// Test that all natives using the ToObject call throw the right exception.
for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
// Sanity check that all functions are correct
@@ -166,8 +170,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].call(null);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
@@ -176,8 +179,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].call(undefined);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
@@ -186,8 +188,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].apply(null);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
@@ -196,8 +197,7 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].apply(undefined);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
}
@@ -257,8 +257,7 @@ for (var j = 0; j < mapping_functions.length; j++) {
null);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
@@ -269,8 +268,7 @@ for (var j = 0; j < mapping_functions.length; j++) {
undefined);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
}
@@ -311,8 +309,7 @@ for (var j = 0; j < reducing_functions.length; j++) {
reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
@@ -321,8 +318,7 @@ for (var j = 0; j < reducing_functions.length; j++) {
reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
} catch (e) {
exception = true;
- assertTrue("called_on_null_or_undefined" == e.type ||
- "null_to_object" == e.type);
+ checkExpectedMessage(e);
}
assertTrue(exception);
}
diff --git a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part1.js b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part1.js
index 6941d80..596af97 100644
--- a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part1.js
+++ b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part1.js
@@ -147,10 +147,12 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
+ "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
"NotifyDeoptimized": true,
+ "NotifyStubFailure": true,
"NotifyOSR": true,
"CreateObjectLiteralBoilerplate": true,
"CloneLiteralBoilerplate": true,
@@ -195,7 +197,13 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true
+ "_GetCachedArrayIndex": true,
+ "_OneByteSeqStringSetChar": true,
+ "_TwoByteSeqStringSetChar": true,
+
+ // Only for debugging parallel recompilation.
+ "InstallRecompiledCode": true,
+ "ForceParallelRecompile": true
};
var currentlyUncallable = {
diff --git a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part2.js b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part2.js
index ea8a2cf..2faad1d 100644
--- a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part2.js
+++ b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part2.js
@@ -147,6 +147,7 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
+ "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -195,7 +196,13 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true
+ "_GetCachedArrayIndex": true,
+ "_OneByteSeqStringSetChar": true,
+ "_TwoByteSeqStringSetChar": true,
+
+ // Only for debugging parallel recompilation.
+ "InstallRecompiledCode": true,
+ "ForceParallelRecompile": true
};
var currentlyUncallable = {
diff --git a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part3.js b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part3.js
index ecfdf97..ed71d33 100644
--- a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part3.js
+++ b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part3.js
@@ -147,6 +147,7 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
+ "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -195,7 +196,13 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true
+ "_GetCachedArrayIndex": true,
+ "_OneByteSeqStringSetChar": true,
+ "_TwoByteSeqStringSetChar": true,
+
+ // Only for debugging parallel recompilation.
+ "InstallRecompiledCode": true,
+ "ForceParallelRecompile": true
};
var currentlyUncallable = {
diff --git a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part4.js b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part4.js
index da04596..1b128d5 100644
--- a/src/3rdparty/v8/test/mjsunit/fuzz-natives-part4.js
+++ b/src/3rdparty/v8/test/mjsunit/fuzz-natives-part4.js
@@ -147,6 +147,7 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
+ "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -195,7 +196,13 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true
+ "_GetCachedArrayIndex": true,
+ "_OneByteSeqStringSetChar": true,
+ "_TwoByteSeqStringSetChar": true,
+
+ // Only for debugging parallel recompilation.
+ "InstallRecompiledCode": true,
+ "ForceParallelRecompile": true
};
var currentlyUncallable = {
diff --git a/src/3rdparty/v8/test/mjsunit/generated-transition-stub.js b/src/3rdparty/v8/test/mjsunit/generated-transition-stub.js
new file mode 100644
index 0000000..dd1043b
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/generated-transition-stub.js
@@ -0,0 +1,218 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --compiled_transitions
+
+try {} catch (e) {}
+
+var iteration_count = 1;
+
+function transition1(a, i, v) {
+ a[i] = v;
+}
+
+//
+// Test PACKED SMI -> PACKED DOUBLE
+//
+
+var a1 = [0, 1, 2, 3, 4];
+transition1(a1, 0, 2.5);
+var a2 = [0, 1, 2, 3, 4];
+transition1(a2, 0, 2.5);
+assertFalse(%HasFastHoleyElements(a2));
+%OptimizeFunctionOnNextCall(transition1);
+
+var a3 = [0, 1, 2, 3, 4];
+assertTrue(%HasFastSmiElements(a3));
+transition1(a3, 0, 2.5);
+assertFalse(%HasFastHoleyElements(a3));
+assertEquals(4, a3[4]);
+assertEquals(2.5, a3[0]);
+
+// Test handling of hole.
+var a4 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9];
+a4.length = 7;
+assertTrue(%HasFastSmiElements(a4));
+transition1(a4, 0, 2.5);
+assertFalse(%HasFastHoleyElements(a4));
+assertEquals(2.5, a4[0]);
+assertEquals(undefined, a4[8]);
+
+// Large array should deopt to runtimea
+for (j = 0; j < iteration_count; ++j) {
+ a5 = new Array();
+ for (i = 0; i < 0x40000; ++i) {
+ a5[i] = 0;
+ }
+ assertTrue(%HasFastSmiElements(a5));
+ transition1(a5, 0, 2.5);
+ assertEquals(2.5, a5[0]);
+}
+
+//
+// Test HOLEY SMI -> HOLEY DOUBLE
+//
+
+function transition2(a, i, v) {
+ a[i] = v;
+}
+
+var b1 = [0, 1, 2, , 4];
+transition2(b1, 0, 2.5);
+var b2 = [0, 1, 2, , 4];
+transition2(b2, 0, 2.5);
+assertTrue(%HasFastHoleyElements(b2));
+%OptimizeFunctionOnNextCall(transition2);
+
+var b3 = [0, 1, 2, , 4];
+assertTrue(%HasFastSmiElements(b3));
+assertTrue(%HasFastHoleyElements(b3));
+transition2(b3, 0, 2.5);
+assertTrue(%HasFastHoleyElements(b3));
+assertEquals(4, b3[4]);
+assertEquals(2.5, b3[0]);
+
+// Large array should deopt to runtime
+for (j = 0; j < iteration_count; ++j) {
+ b4 = [0, ,0];
+ for (i = 3; i < 0x40000; ++i) {
+ b4[i] = 0;
+ }
+ assertTrue(%HasFastSmiElements(b4));
+ transition2(b4, 0, 2.5);
+ assertEquals(2.5, b4[0]);
+}
+
+//
+// Test PACKED DOUBLE -> PACKED OBJECT
+//
+
+function transition3(a, i, v) {
+ a[i] = v;
+}
+
+var c1 = [0, 1, 2, 3.5, 4];
+transition3(c1, 0, new Object());
+var c2 = [0, 1, 2, 3.5, 4];
+transition3(c2, 0, new Object());
+assertTrue(%HasFastObjectElements(c2));
+assertTrue(!%HasFastHoleyElements(c2));
+%OptimizeFunctionOnNextCall(transition3);
+
+var c3 = [0, 1, 2, 3.5, 4];
+assertTrue(%HasFastDoubleElements(c3));
+assertTrue(!%HasFastHoleyElements(c3));
+transition3(c3, 0, new Array());
+assertTrue(!%HasFastHoleyElements(c3));
+assertTrue(%HasFastObjectElements(c3));
+assertEquals(4, c3[4]);
+assertEquals(0, c3[0].length);
+
+// Large array under the deopt threshold should be able to trigger GC without
+// causing crashes.
+for (j = 0; j < iteration_count; ++j) {
+ c4 = [0, 2.5, 0];
+ for (i = 3; i < 0xa000; ++i) {
+ c4[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(c4));
+ assertTrue(!%HasFastHoleyElements(c4));
+ transition3(c4, 0, new Array(5));
+ assertTrue(!%HasFastHoleyElements(c4));
+ assertTrue(%HasFastObjectElements(c4));
+ assertEquals(5, c4[0].length);
+}
+
+// Large array should deopt to runtime
+for (j = 0; j < iteration_count; ++j) {
+ c5 = [0, 2.5, 0];
+ for (i = 3; i < 0x40000; ++i) {
+ c5[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(c5));
+ assertTrue(!%HasFastHoleyElements(c5));
+ transition3(c5, 0, new Array(5));
+ assertTrue(!%HasFastHoleyElements(c5));
+ assertTrue(%HasFastObjectElements(c5));
+ assertEquals(5, c5[0].length);
+}
+
+//
+// Test HOLEY DOUBLE -> HOLEY OBJECT
+//
+
+function transition4(a, i, v) {
+ a[i] = v;
+}
+
+var d1 = [0, 1, , 3.5, 4];
+transition4(d1, 0, new Object());
+var d2 = [0, 1, , 3.5, 4];
+transition4(d2, 0, new Object());
+assertTrue(%HasFastObjectElements(d2));
+assertTrue(%HasFastHoleyElements(d2));
+%OptimizeFunctionOnNextCall(transition4);
+
+var d3 = [0, 1, , 3.5, 4];
+assertTrue(%HasFastDoubleElements(d3));
+assertTrue(%HasFastHoleyElements(d3));
+transition4(d3, 0, new Array());
+assertTrue(%HasFastHoleyElements(d3));
+assertTrue(%HasFastObjectElements(d3));
+assertEquals(4, d3[4]);
+assertEquals(0, d3[0].length);
+
+// Large array under the deopt threshold should be able to trigger GC without
+// causing crashes.
+for (j = 0; j < iteration_count; ++j) {
+ d4 = [, 2.5, ,];
+ for (i = 3; i < 0xa000; ++i) {
+ d4[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(d4));
+ assertTrue(%HasFastHoleyElements(d4));
+ transition4(d4, 0, new Array(5));
+ assertTrue(%HasFastHoleyElements(d4));
+ assertTrue(%HasFastObjectElements(d4));
+ assertEquals(5, d4[0].length);
+ assertEquals(undefined, d4[2]);
+}
+
+// Large array should deopt to runtime
+for (j = 0; j < iteration_count; ++j) {
+ d5 = [, 2.5, ,];
+ for (i = 3; i < 0x40000; ++i) {
+ d5[i] = 0;
+ }
+ assertTrue(%HasFastDoubleElements(d5));
+ assertTrue(%HasFastHoleyElements(d5));
+ transition4(d5, 0, new Array(5));
+ assertTrue(%HasFastHoleyElements(d5));
+ assertTrue(%HasFastObjectElements(d5));
+ assertEquals(5, d5[0].length);
+ assertEquals(undefined, d5[2]);
+}
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js b/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js
index a4b272f..3c0f18c 100644
--- a/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js
+++ b/src/3rdparty/v8/test/mjsunit/harmony/module-linking.js
@@ -112,7 +112,7 @@ module R {
assertThrows(function() { eval("c = -1") }, SyntaxError)
assertThrows(function() { R.c = -2 }, TypeError)
- // Initialize first bunch or variables.
+ // Initialize first bunch of variables.
export var v = 1
export let l = 2
export const c = 3
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/object-observe.js b/src/3rdparty/v8/test/mjsunit/harmony/object-observe.js
index 945841b..584d9e8 100644
--- a/src/3rdparty/v8/test/mjsunit/harmony/object-observe.js
+++ b/src/3rdparty/v8/test/mjsunit/harmony/object-observe.js
@@ -25,13 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-observation
+// Flags: --harmony-observation --harmony-proxies --harmony-collections
+// Flags: --allow-natives-syntax
var allObservers = [];
function reset() {
allObservers.forEach(function(observer) { observer.reset(); });
}
+function stringifyNoThrow(arg) {
+ try {
+ return JSON.stringify(arg);
+ } catch (e) {
+ return '{<circular reference>}';
+ }
+}
+
function createObserver() {
"use strict"; // So that |this| in callback can be undefined.
@@ -56,6 +65,8 @@ function createObserver() {
assertCallbackRecords: function(recs) {
this.assertRecordCount(recs.length);
for (var i = 0; i < recs.length; i++) {
+ if ('name' in recs[i]) recs[i].name = String(recs[i].name);
+ print(i, stringifyNoThrow(this.records[i]), stringifyNoThrow(recs[i]));
assertSame(this.records[i].object, recs[i].object);
assertEquals('string', typeof recs[i].type);
assertPropertiesEqual(this.records[i], recs[i]);
@@ -93,13 +104,19 @@ Object.defineProperty(changeRecordWithAccessor, 'name', {
enumerable: true
})
+
// Object.observe
assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
+assertEquals(obj, Object.observe(obj, observer.callback));
+
// Object.unobserve
assertThrows(function() { Object.unobserve(4, observer.callback); }, TypeError);
+assertThrows(function() { Object.unobserve(obj, nonFunction); }, TypeError);
+assertEquals(obj, Object.unobserve(obj, observer.callback));
+
// Object.getNotifier
var notifier = Object.getNotifier(obj);
@@ -124,11 +141,30 @@ assertFalse(recordCreated);
notifier.notify(changeRecordWithAccessor);
assertFalse(recordCreated); // not observed yet
+
// Object.deliverChangeRecords
assertThrows(function() { Object.deliverChangeRecords(nonFunction); }, TypeError);
-// Multiple records are delivered.
Object.observe(obj, observer.callback);
+
+
+// notify uses to [[CreateOwnProperty]] to create changeRecord;
+reset();
+var protoExpandoAccessed = false;
+Object.defineProperty(Object.prototype, 'protoExpando',
+ {
+ configurable: true,
+ set: function() { protoExpandoAccessed = true; }
+ }
+);
+notifier.notify({ type: 'foo', protoExpando: 'val'});
+assertFalse(protoExpandoAccessed);
+delete Object.prototype.protoExpando;
+Object.deliverChangeRecords(observer.callback);
+
+
+// Multiple records are delivered.
+reset();
notifier.notify({
type: 'updated',
name: 'foo',
@@ -147,11 +183,13 @@ observer.assertCallbackRecords([
{ object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
]);
+
// No delivery takes place if no records are pending
reset();
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
+
// Multiple observation has no effect.
reset();
Object.observe(obj, observer.callback);
@@ -162,6 +200,7 @@ Object.getNotifier(obj).notify({
Object.deliverChangeRecords(observer.callback);
observer.assertCalled();
+
// Observation can be stopped.
reset();
Object.unobserve(obj, observer.callback);
@@ -171,6 +210,7 @@ Object.getNotifier(obj).notify({
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
+
// Multiple unobservation has no effect
reset();
Object.unobserve(obj, observer.callback);
@@ -181,6 +221,7 @@ Object.getNotifier(obj).notify({
Object.deliverChangeRecords(observer.callback);
observer.assertNotCalled();
+
// Re-observation works and only includes changeRecords after of call.
reset();
Object.getNotifier(obj).notify({
@@ -194,6 +235,7 @@ records = undefined;
Object.deliverChangeRecords(observer.callback);
observer.assertRecordCount(1);
+
// Observing a continuous stream of changes, while itermittantly unobserving.
reset();
Object.observe(obj, observer.callback);
@@ -234,6 +276,7 @@ observer.assertCallbackRecords([
{ object: obj, type: 'foo', val: 5 }
]);
+
// Observing multiple objects; records appear in order.
reset();
var obj2 = {};
@@ -258,6 +301,37 @@ observer.assertCallbackRecords([
{ object: obj3, type: 'foo3' }
]);
+
+// Recursive observation.
+var obj = {a: 1};
+var callbackCount = 0;
+function recursiveObserver(r) {
+ assertEquals(1, r.length);
+ ++callbackCount;
+ if (r[0].oldValue < 100) ++obj[r[0].name];
+}
+Object.observe(obj, recursiveObserver);
+++obj.a;
+Object.deliverChangeRecords(recursiveObserver);
+assertEquals(100, callbackCount);
+
+var obj1 = {a: 1};
+var obj2 = {a: 1};
+var recordCount = 0;
+function recursiveObserver2(r) {
+ recordCount += r.length;
+ if (r[0].oldValue < 100) {
+ ++obj1.a;
+ ++obj2.a;
+ }
+}
+Object.observe(obj1, recursiveObserver2);
+Object.observe(obj2, recursiveObserver2);
+++obj1.a;
+Object.deliverChangeRecords(recursiveObserver2);
+assertEquals(199, recordCount);
+
+
// Observing named properties.
reset();
var obj = {a: 1}
@@ -274,12 +348,19 @@ obj.a = 7; // ignored
Object.defineProperty(obj, "a", {value: 8});
Object.defineProperty(obj, "a", {value: 7, writable: true});
Object.defineProperty(obj, "a", {get: function() {}});
-Object.defineProperty(obj, "a", {get: function() {}});
+Object.defineProperty(obj, "a", {get: frozenFunction});
+Object.defineProperty(obj, "a", {get: frozenFunction}); // ignored
+Object.defineProperty(obj, "a", {get: frozenFunction, set: frozenFunction});
+Object.defineProperty(obj, "a", {set: frozenFunction}); // ignored
+Object.defineProperty(obj, "a", {get: undefined, set: frozenFunction});
delete obj.a;
delete obj.a;
Object.defineProperty(obj, "a", {get: function() {}, configurable: true});
Object.defineProperty(obj, "a", {value: 9, writable: true});
obj.a = 10;
+++obj.a;
+obj.a++;
+obj.a *= 3;
delete obj.a;
Object.defineProperty(obj, "a", {value: 11, configurable: true});
Object.deliverChangeRecords(observer.callback);
@@ -290,19 +371,25 @@ observer.assertCallbackRecords([
{ object: obj, name: "a", type: "new" },
{ object: obj, name: "a", type: "updated", oldValue: 4 },
{ object: obj, name: "a", type: "updated", oldValue: 5 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 6 },
+ { object: obj, name: "a", type: "reconfigured" },
{ object: obj, name: "a", type: "updated", oldValue: 6 },
{ object: obj, name: "a", type: "reconfigured", oldValue: 8 },
{ object: obj, name: "a", type: "reconfigured", oldValue: 7 },
{ object: obj, name: "a", type: "reconfigured" },
+ { object: obj, name: "a", type: "reconfigured" },
+ { object: obj, name: "a", type: "reconfigured" },
{ object: obj, name: "a", type: "deleted" },
{ object: obj, name: "a", type: "new" },
{ object: obj, name: "a", type: "reconfigured" },
{ object: obj, name: "a", type: "updated", oldValue: 9 },
- { object: obj, name: "a", type: "deleted", oldValue: 10 },
+ { object: obj, name: "a", type: "updated", oldValue: 10 },
+ { object: obj, name: "a", type: "updated", oldValue: 11 },
+ { object: obj, name: "a", type: "updated", oldValue: 12 },
+ { object: obj, name: "a", type: "deleted", oldValue: 36 },
{ object: obj, name: "a", type: "new" },
]);
+
// Observing indexed properties.
reset();
var obj = {'1': 1}
@@ -319,11 +406,19 @@ obj[1] = 7; // ignored
Object.defineProperty(obj, "1", {value: 8});
Object.defineProperty(obj, "1", {value: 7, writable: true});
Object.defineProperty(obj, "1", {get: function() {}});
+Object.defineProperty(obj, "1", {get: frozenFunction});
+Object.defineProperty(obj, "1", {get: frozenFunction}); // ignored
+Object.defineProperty(obj, "1", {get: frozenFunction, set: frozenFunction});
+Object.defineProperty(obj, "1", {set: frozenFunction}); // ignored
+Object.defineProperty(obj, "1", {get: undefined, set: frozenFunction});
delete obj[1];
delete obj[1];
Object.defineProperty(obj, "1", {get: function() {}, configurable: true});
Object.defineProperty(obj, "1", {value: 9, writable: true});
obj[1] = 10;
+++obj[1];
+obj[1]++;
+obj[1] *= 3;
delete obj[1];
Object.defineProperty(obj, "1", {value: 11, configurable: true});
Object.deliverChangeRecords(observer.callback);
@@ -334,20 +429,196 @@ observer.assertCallbackRecords([
{ object: obj, name: "1", type: "new" },
{ object: obj, name: "1", type: "updated", oldValue: 4 },
{ object: obj, name: "1", type: "updated", oldValue: 5 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 6 },
+ { object: obj, name: "1", type: "reconfigured" },
{ object: obj, name: "1", type: "updated", oldValue: 6 },
{ object: obj, name: "1", type: "reconfigured", oldValue: 8 },
{ object: obj, name: "1", type: "reconfigured", oldValue: 7 },
- // TODO(observe): oldValue should not be present below.
- { object: obj, name: "1", type: "deleted", oldValue: undefined },
+ { object: obj, name: "1", type: "reconfigured" },
+ { object: obj, name: "1", type: "reconfigured" },
+ { object: obj, name: "1", type: "reconfigured" },
+ { object: obj, name: "1", type: "deleted" },
{ object: obj, name: "1", type: "new" },
- // TODO(observe): oldValue should be absent below, and type = "reconfigured".
- { object: obj, name: "1", type: "updated", oldValue: undefined },
+ { object: obj, name: "1", type: "reconfigured" },
{ object: obj, name: "1", type: "updated", oldValue: 9 },
- { object: obj, name: "1", type: "deleted", oldValue: 10 },
+ { object: obj, name: "1", type: "updated", oldValue: 10 },
+ { object: obj, name: "1", type: "updated", oldValue: 11 },
+ { object: obj, name: "1", type: "updated", oldValue: 12 },
+ { object: obj, name: "1", type: "deleted", oldValue: 36 },
{ object: obj, name: "1", type: "new" },
]);
+
+// Test all kinds of objects generically.
+function TestObserveConfigurable(obj, prop) {
+ reset();
+ obj[prop] = 1;
+ Object.observe(obj, observer.callback);
+ obj[prop] = 2;
+ obj[prop] = 3;
+ delete obj[prop];
+ obj[prop] = 4;
+ obj[prop] = 4; // ignored
+ obj[prop] = 5;
+ Object.defineProperty(obj, prop, {value: 6});
+ Object.defineProperty(obj, prop, {writable: false});
+ obj[prop] = 7; // ignored
+ Object.defineProperty(obj, prop, {value: 8});
+ Object.defineProperty(obj, prop, {value: 7, writable: true});
+ Object.defineProperty(obj, prop, {get: function() {}});
+ Object.defineProperty(obj, prop, {get: frozenFunction});
+ Object.defineProperty(obj, prop, {get: frozenFunction}); // ignored
+ Object.defineProperty(obj, prop, {get: frozenFunction, set: frozenFunction});
+ Object.defineProperty(obj, prop, {set: frozenFunction}); // ignored
+ Object.defineProperty(obj, prop, {get: undefined, set: frozenFunction});
+ obj.__defineSetter__(prop, frozenFunction); // ignored
+ obj.__defineSetter__(prop, function() {});
+ obj.__defineGetter__(prop, function() {});
+ delete obj[prop];
+ delete obj[prop]; // ignored
+ obj.__defineGetter__(prop, function() {});
+ delete obj[prop];
+ Object.defineProperty(obj, prop, {get: function() {}, configurable: true});
+ Object.defineProperty(obj, prop, {value: 9, writable: true});
+ obj[prop] = 10;
+ ++obj[prop];
+ obj[prop]++;
+ obj[prop] *= 3;
+ delete obj[prop];
+ Object.defineProperty(obj, prop, {value: 11, configurable: true});
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: obj, name: prop, type: "updated", oldValue: 1 },
+ { object: obj, name: prop, type: "updated", oldValue: 2 },
+ { object: obj, name: prop, type: "deleted", oldValue: 3 },
+ { object: obj, name: prop, type: "new" },
+ { object: obj, name: prop, type: "updated", oldValue: 4 },
+ { object: obj, name: prop, type: "updated", oldValue: 5 },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "updated", oldValue: 6 },
+ { object: obj, name: prop, type: "reconfigured", oldValue: 8 },
+ { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "deleted" },
+ { object: obj, name: prop, type: "new" },
+ { object: obj, name: prop, type: "deleted" },
+ { object: obj, name: prop, type: "new" },
+ { object: obj, name: prop, type: "reconfigured" },
+ { object: obj, name: prop, type: "updated", oldValue: 9 },
+ { object: obj, name: prop, type: "updated", oldValue: 10 },
+ { object: obj, name: prop, type: "updated", oldValue: 11 },
+ { object: obj, name: prop, type: "updated", oldValue: 12 },
+ { object: obj, name: prop, type: "deleted", oldValue: 36 },
+ { object: obj, name: prop, type: "new" },
+ ]);
+ Object.unobserve(obj, observer.callback);
+ delete obj[prop];
+}
+
+function TestObserveNonConfigurable(obj, prop, desc) {
+ reset();
+ obj[prop] = 1;
+ Object.observe(obj, observer.callback);
+ obj[prop] = 4;
+ obj[prop] = 4; // ignored
+ obj[prop] = 5;
+ Object.defineProperty(obj, prop, {value: 6});
+ Object.defineProperty(obj, prop, {value: 6}); // ignored
+ Object.defineProperty(obj, prop, {value: 7});
+ Object.defineProperty(obj, prop, {enumerable: desc.enumerable}); // ignored
+ Object.defineProperty(obj, prop, {writable: false});
+ obj[prop] = 7; // ignored
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: obj, name: prop, type: "updated", oldValue: 1 },
+ { object: obj, name: prop, type: "updated", oldValue: 4 },
+ { object: obj, name: prop, type: "updated", oldValue: 5 },
+ { object: obj, name: prop, type: "updated", oldValue: 6 },
+ { object: obj, name: prop, type: "reconfigured" },
+ ]);
+ Object.unobserve(obj, observer.callback);
+}
+
+function createProxy(create, x) {
+ var handler = {
+ getPropertyDescriptor: function(k) {
+ for (var o = this.target; o; o = Object.getPrototypeOf(o)) {
+ var desc = Object.getOwnPropertyDescriptor(o, k);
+ if (desc) return desc;
+ }
+ return undefined;
+ },
+ getOwnPropertyDescriptor: function(k) {
+ return Object.getOwnPropertyDescriptor(this.target, k);
+ },
+ defineProperty: function(k, desc) {
+ var x = Object.defineProperty(this.target, k, desc);
+ Object.deliverChangeRecords(this.callback);
+ return x;
+ },
+ delete: function(k) {
+ var x = delete this.target[k];
+ Object.deliverChangeRecords(this.callback);
+ return x;
+ },
+ getPropertyNames: function() {
+ return Object.getOwnPropertyNames(this.target);
+ },
+ target: {isProxy: true},
+ callback: function(changeRecords) {
+ print("callback", stringifyNoThrow(handler.proxy), stringifyNoThrow(got));
+ for (var i in changeRecords) {
+ var got = changeRecords[i];
+ var change = {object: handler.proxy, name: got.name, type: got.type};
+ if ("oldValue" in got) change.oldValue = got.oldValue;
+ Object.getNotifier(handler.proxy).notify(change);
+ }
+ },
+ };
+ Object.observe(handler.target, handler.callback);
+ return handler.proxy = create(handler, x);
+}
+
+var objects = [
+ {},
+ [],
+ this, // global object
+ function(){},
+ (function(){ return arguments })(),
+ (function(){ "use strict"; return arguments })(),
+ Object(1), Object(true), Object("bla"),
+ new Date(),
+ Object, Function, Date, RegExp,
+ new Set, new Map, new WeakMap,
+ new ArrayBuffer(10), new Int32Array(5),
+ createProxy(Proxy.create, null),
+ createProxy(Proxy.createFunction, function(){}),
+];
+var properties = ["a", "1", 1, "length", "prototype", "name", "caller"];
+
+// Cases that yield non-standard results.
+function blacklisted(obj, prop) {
+ return (obj instanceof Int32Array && prop == 1) ||
+ (obj instanceof Int32Array && prop === "length") ||
+ (obj instanceof ArrayBuffer && prop == 1)
+}
+
+for (var i in objects) for (var j in properties) {
+ var obj = objects[i];
+ var prop = properties[j];
+ if (blacklisted(obj, prop)) continue;
+ var desc = Object.getOwnPropertyDescriptor(obj, prop);
+ print("***", typeof obj, stringifyNoThrow(obj), prop);
+ if (!desc || desc.configurable)
+ TestObserveConfigurable(obj, prop);
+ else if (desc.writable)
+ TestObserveNonConfigurable(obj, prop, desc);
+}
+
+
// Observing array length (including truncation)
reset();
var arr = ['a', 'b', 'c', 'd'];
@@ -355,44 +626,54 @@ var arr2 = ['alpha', 'beta'];
var arr3 = ['hello'];
arr3[2] = 'goodbye';
arr3.length = 6;
-// TODO(adamk): Enable this test case when it can run in a reasonable
-// amount of time.
-//var slow_arr = new Array(1000000000);
-//slow_arr[500000000] = 'hello';
+var slow_arr = new Array(1000000000);
+slow_arr[500000000] = 'hello';
Object.defineProperty(arr, '0', {configurable: false});
Object.defineProperty(arr, '2', {get: function(){}});
Object.defineProperty(arr2, '0', {get: function(){}, configurable: false});
Object.observe(arr, observer.callback);
Object.observe(arr2, observer.callback);
Object.observe(arr3, observer.callback);
+Object.observe(slow_arr, observer.callback);
arr.length = 2;
arr.length = 0;
arr.length = 10;
+Object.defineProperty(arr, 'length', {writable: false});
arr2.length = 0;
arr2.length = 1; // no change expected
+Object.defineProperty(arr2, 'length', {value: 1, writable: false});
arr3.length = 0;
+++arr3.length;
+arr3.length++;
+arr3.length /= 2;
Object.defineProperty(arr3, 'length', {value: 5});
Object.defineProperty(arr3, 'length', {value: 10, writable: false});
+slow_arr.length = 100;
Object.deliverChangeRecords(observer.callback);
observer.assertCallbackRecords([
{ object: arr, name: '3', type: 'deleted', oldValue: 'd' },
- // TODO(adamk): oldValue should not be present below
- { object: arr, name: '2', type: 'deleted', oldValue: undefined },
+ { object: arr, name: '2', type: 'deleted' },
{ object: arr, name: 'length', type: 'updated', oldValue: 4 },
{ object: arr, name: '1', type: 'deleted', oldValue: 'b' },
{ object: arr, name: 'length', type: 'updated', oldValue: 2 },
{ object: arr, name: 'length', type: 'updated', oldValue: 1 },
+ { object: arr, name: 'length', type: 'reconfigured' },
{ object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
{ object: arr2, name: 'length', type: 'updated', oldValue: 2 },
+ { object: arr2, name: 'length', type: 'reconfigured', oldValue: 1 },
{ object: arr3, name: '2', type: 'deleted', oldValue: 'goodbye' },
{ object: arr3, name: '0', type: 'deleted', oldValue: 'hello' },
{ object: arr3, name: 'length', type: 'updated', oldValue: 6 },
{ object: arr3, name: 'length', type: 'updated', oldValue: 0 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 5 },
- // TODO(adamk): This record should be merged with the above
- { object: arr3, name: 'length', type: 'reconfigured' },
+ { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
+ { object: arr3, name: 'length', type: 'updated', oldValue: 2 },
+ { object: arr3, name: 'length', type: 'updated', oldValue: 1 },
+ { object: arr3, name: 'length', type: 'reconfigured', oldValue: 5 },
+ { object: slow_arr, name: '500000000', type: 'deleted', oldValue: 'hello' },
+ { object: slow_arr, name: 'length', type: 'updated', oldValue: 1000000000 },
]);
+
// Assignments in loops (checking different IC states).
reset();
var obj = {};
@@ -424,6 +705,7 @@ observer.assertCallbackRecords([
{ object: obj, name: "4", type: "new" },
]);
+
// Adding elements past the end of an array should notify on length
reset();
var arr = [1, 2, 3];
@@ -446,6 +728,7 @@ observer.assertCallbackRecords([
{ object: arr, name: '50', type: 'new' },
]);
+
// Tests for array methods, first on arrays and then on plain objects
//
// === ARRAYS ===
@@ -519,6 +802,7 @@ observer.assertCallbackRecords([
{ object: array, name: '2', type: 'updated', oldValue: 3 },
]);
+
//
// === PLAIN OBJECTS ===
//
@@ -589,3 +873,184 @@ observer.assertCallbackRecords([
{ object: array, name: '2', type: 'updated', oldValue: 3 },
{ object: array, name: 'length', type: 'updated', oldValue: 3 },
]);
+
+// Exercise StoreIC_ArrayLength
+reset();
+var dummy = {};
+Object.observe(dummy, observer.callback);
+Object.unobserve(dummy, observer.callback);
+var array = [0];
+Object.observe(array, observer.callback);
+array.splice(0, 1);
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: array, name: '0', type: 'deleted', oldValue: 0 },
+ { object: array, name: 'length', type: 'updated', oldValue: 1},
+]);
+
+
+// __proto__
+reset();
+var obj = {};
+Object.observe(obj, observer.callback);
+var p = {foo: 'yes'};
+var q = {bar: 'no'};
+obj.__proto__ = p;
+obj.__proto__ = p; // ignored
+obj.__proto__ = null;
+obj.__proto__ = q; // the __proto__ accessor is gone
+// TODO(adamk): Add tests for objects with hidden prototypes
+// once we support observing the global object.
+Object.deliverChangeRecords(observer.callback);
+observer.assertCallbackRecords([
+ { object: obj, name: '__proto__', type: 'prototype',
+ oldValue: Object.prototype },
+ { object: obj, name: '__proto__', type: 'prototype', oldValue: p },
+ { object: obj, name: '__proto__', type: 'new' },
+]);
+
+
+// Function.prototype
+reset();
+var fun = function(){};
+Object.observe(fun, observer.callback);
+var myproto = {foo: 'bar'};
+fun.prototype = myproto;
+fun.prototype = 7;
+fun.prototype = 7; // ignored
+Object.defineProperty(fun, 'prototype', {value: 8});
+Object.deliverChangeRecords(observer.callback);
+observer.assertRecordCount(3);
+// Manually examine the first record in order to test
+// lazy creation of oldValue
+assertSame(fun, observer.records[0].object);
+assertEquals('prototype', observer.records[0].name);
+assertEquals('updated', observer.records[0].type);
+// The only existing reference to the oldValue object is in this
+// record, so to test that lazy creation happened correctly
+// we compare its constructor to our function (one of the invariants
+// ensured when creating an object via AllocateFunctionPrototype).
+assertSame(fun, observer.records[0].oldValue.constructor);
+observer.records.splice(0, 1);
+observer.assertCallbackRecords([
+ { object: fun, name: 'prototype', type: 'updated', oldValue: myproto },
+ { object: fun, name: 'prototype', type: 'updated', oldValue: 7 },
+]);
+
+// Function.prototype should not be observable except on the object itself
+reset();
+var fun = function(){};
+var obj = { __proto__: fun };
+Object.observe(obj, observer.callback);
+obj.prototype = 7;
+Object.deliverChangeRecords(observer.callback);
+observer.assertNotCalled();
+
+
+// Check that changes in observation status are detected in all IC states and
+// in optimized code, especially in cases usually using fast elements.
+var mutation = [
+ "a[i] = v",
+ "a[i] ? ++a[i] : a[i] = v",
+ "a[i] ? a[i]++ : a[i] = v",
+ "a[i] ? a[i] += 1 : a[i] = v",
+ "a[i] ? a[i] -= -1 : a[i] = v",
+];
+
+var props = [1, "1", "a"];
+
+function TestFastElements(prop, mutation, prepopulate, polymorphic, optimize) {
+ var setElement = eval(
+ "(function setElement(a, i, v) { " + mutation + "; " +
+ "/* " + [].join.call(arguments, " ") + " */" +
+ "})"
+ );
+ print("TestFastElements:", setElement);
+
+ var arr = prepopulate ? [1, 2, 3, 4, 5] : [0];
+ if (prepopulate) arr[prop] = 2; // for non-element case
+ setElement(arr, prop, 3);
+ setElement(arr, prop, 4);
+ if (polymorphic) setElement(["M", "i", "l", "n", "e", "r"], 0, "m");
+ if (optimize) %OptimizeFunctionOnNextCall(setElement);
+ setElement(arr, prop, 5);
+
+ reset();
+ Object.observe(arr, observer.callback);
+ setElement(arr, prop, 989898);
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: arr, name: "" + prop, type: 'updated', oldValue: 5 }
+ ]);
+}
+
+for (var b1 = 0; b1 < 2; ++b1)
+ for (var b2 = 0; b2 < 2; ++b2)
+ for (var b3 = 0; b3 < 2; ++b3)
+ for (var i in props)
+ for (var j in mutation)
+ TestFastElements(props[i], mutation[j], b1 != 0, b2 != 0, b3 != 0);
+
+
+var mutation = [
+ "a.length = v",
+ "a.length += newSize - oldSize",
+ "a.length -= oldSize - newSize",
+];
+
+var mutationByIncr = [
+ "++a.length",
+ "a.length++",
+];
+
+function TestFastElementsLength(
+ mutation, polymorphic, optimize, oldSize, newSize) {
+ var setLength = eval(
+ "(function setLength(a, v) { " + mutation + "; " +
+ "/* " + [].join.call(arguments, " ") + " */"
+ + "})"
+ );
+ print("TestFastElementsLength:", setLength);
+
+ function array(n) {
+ var arr = new Array(n);
+ for (var i = 0; i < n; ++i) arr[i] = i;
+ return arr;
+ }
+
+ setLength(array(oldSize), newSize);
+ setLength(array(oldSize), newSize);
+ if (polymorphic) setLength(array(oldSize).map(isNaN), newSize);
+ if (optimize) %OptimizeFunctionOnNextCall(setLength);
+ setLength(array(oldSize), newSize);
+
+ reset();
+ var arr = array(oldSize);
+ Object.observe(arr, observer.callback);
+ setLength(arr, newSize);
+ Object.deliverChangeRecords(observer.callback);
+ if (oldSize === newSize) {
+ observer.assertNotCalled();
+ } else {
+ var count = oldSize > newSize ? oldSize - newSize : 0;
+ observer.assertRecordCount(count + 1);
+ var lengthRecord = observer.records[count];
+ assertSame(arr, lengthRecord.object);
+ assertEquals('length', lengthRecord.name);
+ assertEquals('updated', lengthRecord.type);
+ assertSame(oldSize, lengthRecord.oldValue);
+ }
+}
+
+for (var b1 = 0; b1 < 2; ++b1)
+ for (var b2 = 0; b2 < 2; ++b2)
+ for (var n1 = 0; n1 < 3; ++n1)
+ for (var n2 = 0; n2 < 3; ++n2)
+ for (var i in mutation)
+ TestFastElementsLength(mutation[i], b1 != 0, b2 != 0, 20*n1, 20*n2);
+
+for (var b1 = 0; b1 < 2; ++b1)
+ for (var b2 = 0; b2 < 2; ++b2)
+ for (var n = 0; n < 3; ++n)
+ for (var i in mutationByIncr)
+ TestFastElementsLength(mutationByIncr[i], b1 != 0, b2 != 0, 7*n, 7*n+1);
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/proxies.js b/src/3rdparty/v8/test/mjsunit/harmony/proxies.js
index 04fc769..f68e3bd 100644
--- a/src/3rdparty/v8/test/mjsunit/harmony/proxies.js
+++ b/src/3rdparty/v8/test/mjsunit/harmony/proxies.js
@@ -2294,7 +2294,6 @@ function TestConstructorWithProxyPrototype2(create, handler) {
C.prototype = create(handler);
var o = new C;
- assertSame(C.prototype, o.__proto__);
assertSame(C.prototype, Object.getPrototypeOf(o));
}
diff --git a/src/3rdparty/v8/test/mjsunit/harmony/symbols.js b/src/3rdparty/v8/test/mjsunit/harmony/symbols.js
new file mode 100644
index 0000000..baec514
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/harmony/symbols.js
@@ -0,0 +1,127 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-symbols --harmony-collections
+// Flags: --expose-gc --allow-natives-syntax
+
+var symbols = []
+
+// Test different forms of constructor calls, all equivalent.
+function TestNew() {
+ function IndirectSymbol() { return new Symbol }
+ function indirect() { return new IndirectSymbol() }
+ for (var i = 0; i < 10; ++i) {
+ symbols.push(new Symbol)
+ symbols.push(new Symbol())
+ symbols.push(Symbol())
+ symbols.push(indirect())
+ }
+ %OptimizeFunctionOnNextCall(indirect)
+ indirect() // Call once before GC throws away type feedback.
+ gc() // Promote existing symbols and then allocate some more.
+ for (var i = 0; i < 10; ++i) {
+ symbols.push(new Symbol)
+ symbols.push(new Symbol())
+ symbols.push(Symbol())
+ symbols.push(indirect())
+ }
+}
+TestNew()
+
+
+function TestType() {
+ for (var i in symbols) {
+ assertTrue(%_IsSymbol(symbols[i]))
+ assertEquals("object", typeof symbols[i])
+ assertTrue(typeof symbols[i] === "object")
+ assertEquals("[object Symbol]", Object.prototype.toString.call(symbols[i]))
+ }
+}
+TestType()
+
+
+function TestEquality() {
+ // Every symbol should equal itself.
+ for (var i in symbols) {
+ assertSame(symbols[i], symbols[i])
+ assertEquals(symbols[i], symbols[i])
+ assertTrue(Object.is(symbols[i], symbols[i]))
+ assertTrue(symbols[i] === symbols[i])
+ assertTrue(symbols[i] == symbols[i])
+ }
+
+ // All symbols should be distinct.
+ for (var i = 0; i < symbols.length; ++i) {
+ for (var j = i + 1; j < symbols.length; ++j) {
+ assertFalse(Object.is(symbols[i], symbols[j]))
+ assertFalse(symbols[i] === symbols[j])
+ assertFalse(symbols[i] == symbols[j])
+ }
+ }
+}
+TestEquality()
+
+
+function TestGet() {
+ for (var i in symbols) {
+ assertEquals("[object Symbol]", symbols[i].toString())
+ assertEquals(undefined, symbols[i].valueOf)
+ assertEquals(undefined, symbols[i].a)
+ assertEquals(undefined, symbols[i]["a" + "b"])
+ assertEquals(undefined, symbols[i]["" + "1"])
+ assertEquals(undefined, symbols[i][62])
+ }
+}
+TestGet()
+
+
+function TestSet() {
+ for (var i in symbols) {
+ symbols[i].toString = 0
+ assertEquals("[object Symbol]", symbols[i].toString())
+ symbols[i].a = 0
+ assertEquals(undefined, symbols[i].a)
+ symbols[i]["a" + "b"] = 0
+ assertEquals(undefined, symbols[i]["a" + "b"])
+ symbols[i][62] = 0
+ assertEquals(undefined, symbols[i][62])
+ }
+}
+TestSet()
+
+
+function TestMap() {
+ var map = new Map;
+ for (var i in symbols) {
+ map.set(symbols[i], i)
+ }
+ for (var i in symbols) {
+ assertTrue(map.has(symbols[i]))
+ assertEquals(i, map.get(symbols[i]))
+ }
+}
+TestMap()
diff --git a/src/3rdparty/v8/test/mjsunit/json-parser-recursive.js b/src/3rdparty/v8/test/mjsunit/json-parser-recursive.js
new file mode 100644
index 0000000..1e00c83
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/json-parser-recursive.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var str = "[1]";
+for (var i = 0; i < 100000; i++) {
+ str = "[1," + str + "]";
+}
+
+assertThrows(function() { JSON.parse(str); }, RangeError);
diff --git a/src/3rdparty/v8/test/mjsunit/json-recursive.js b/src/3rdparty/v8/test/mjsunit/json-stringify-recursive.js
index 7a8c547..31aa002 100644
--- a/src/3rdparty/v8/test/mjsunit/json-recursive.js
+++ b/src/3rdparty/v8/test/mjsunit/json-stringify-recursive.js
@@ -25,7 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
var a = {};
for (i = 0; i < 10000; i++) {
var current = {};
@@ -51,11 +50,3 @@ assertThrows(function() { JSON.stringify(deepArray); }, RangeError);
var deepObject = {};
for (var i = 0; i < depth; i++) deepObject = { next: deepObject };
assertThrows(function() { JSON.stringify(deepObject); }, RangeError);
-
-
-var str = "[1]";
-for (var i = 0; i < 100000; i++) {
- str = "[1," + str + "]";
-}
-
-assertThrows(function() { JSON.parse(str); }, RangeError);
diff --git a/src/3rdparty/v8/test/mjsunit/json.js b/src/3rdparty/v8/test/mjsunit/json.js
index 6e91725..79826db 100644
--- a/src/3rdparty/v8/test/mjsunit/json.js
+++ b/src/3rdparty/v8/test/mjsunit/json.js
@@ -453,16 +453,23 @@ falseNum.__proto__ = Number.prototype;
falseNum.toString = function() { return 42; };
assertEquals('"42"', JSON.stringify(falseNum));
-// We don't currently allow plain properties called __proto__ in JSON
-// objects in JSON.parse. Instead we read them as we would JS object
-// literals. If we change that, this test should change with it.
-//
-// Parse a non-object value as __proto__. This must not create a
-// __proto__ property different from the original, and should not
-// change the original.
-var o = JSON.parse('{"__proto__":5}');
-assertEquals(Object.prototype, o.__proto__); // __proto__ isn't changed.
-assertEquals(0, Object.keys(o).length); // __proto__ isn't added as enumerable.
+// Parse an object value as __proto__.
+var o1 = JSON.parse('{"__proto__":[]}');
+assertEquals([], o1.__proto__);
+assertEquals(["__proto__"], Object.keys(o1));
+assertEquals([], Object.getOwnPropertyDescriptor(o1, "__proto__").value);
+assertEquals(["__proto__"], Object.getOwnPropertyNames(o1));
+assertTrue(o1.hasOwnProperty("__proto__"));
+assertTrue(Object.prototype.isPrototypeOf(o1));
+
+// Parse a non-object value as __proto__.
+var o2 = JSON.parse('{"__proto__":5}');
+assertEquals(5, o2.__proto__);
+assertEquals(["__proto__"], Object.keys(o2));
+assertEquals(5, Object.getOwnPropertyDescriptor(o2, "__proto__").value);
+assertEquals(["__proto__"], Object.getOwnPropertyNames(o2));
+assertTrue(o2.hasOwnProperty("__proto__"));
+assertTrue(Object.prototype.isPrototypeOf(o2));
var json = '{"stuff before slash\\\\stuff after slash":"whatever"}';
assertEquals(json, JSON.stringify(JSON.parse(json)));
diff --git a/src/3rdparty/v8/test/mjsunit/manual-parallel-recompile.js b/src/3rdparty/v8/test/mjsunit/manual-parallel-recompile.js
new file mode 100644
index 0000000..26b1605
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/manual-parallel-recompile.js
@@ -0,0 +1,79 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc
+// Flags: --parallel-recompilation --manual-parallel-recompilation
+
+function assertOptimized(fun) {
+ // This assertion takes --always-opt and --nocrankshaft flags into account.
+ assertTrue(%GetOptimizationStatus(fun) != 2);
+}
+
+function assertUnoptimized(fun) {
+ assertTrue(%GetOptimizationStatus(fun) != 1);
+}
+
+function f(x) {
+ var xx = x * x;
+ var xxstr = xx.toString();
+ return xxstr.length;
+}
+
+function g(x) {
+ var xxx = Math.sqrt(x) | 0;
+ var xxxstr = xxx.toString();
+ return xxxstr.length;
+}
+
+function k(x) {
+ return x * x;
+}
+
+f(g(1));
+f(g(2));
+assertUnoptimized(f);
+assertUnoptimized(g);
+
+%ForceParallelRecompile(f);
+%ForceParallelRecompile(g);
+assertUnoptimized(f);
+assertUnoptimized(g);
+
+var sum = 0;
+for (var i = 0; i < 10000; i++) sum += f(i) + g(i);
+gc();
+
+assertEquals(95274, sum);
+assertUnoptimized(f);
+assertUnoptimized(g);
+
+%InstallRecompiledCode(f);
+assertOptimized(f);
+assertUnoptimized(g);
+
+%InstallRecompiledCode(g);
+assertOptimized(g);
diff --git a/src/3rdparty/v8/test/mjsunit/math-exp-precision.js b/src/3rdparty/v8/test/mjsunit/math-exp-precision.js
new file mode 100644
index 0000000..ace7edc
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/math-exp-precision.js
@@ -0,0 +1,64 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests that the --fast-math implementation of Math.exp() has
+// reasonable precision.
+
+function exp(x) {
+ return Math.exp(x);
+}
+
+var first_call_result = exp(Math.PI);
+var second_call_result = exp(Math.PI);
+
+function assertAlmostEquals(expected, actual, x) {
+ if (expected == 0 && actual == 0) return; // OK
+ if (expected == Number.POSITIVE_INFINITY &&
+ actual == Number.POSITIVE_INFINITY) {
+ return; // OK
+ }
+ relative_diff = Math.abs(expected/actual - 1);
+ assertTrue(relative_diff < 1e-12, "relative difference of " + relative_diff +
+ " for input " + x);
+}
+
+var increment = Math.PI / 35; // Roughly 0.1, but we want to try many
+ // different mantissae.
+for (var x = -708; x < 710; x += increment) {
+ var ex = exp(x);
+ var reference = Math.pow(Math.E, x);
+ assertAlmostEquals(reference, ex, x);
+ if (ex > 0 && isFinite(ex)) {
+ var back = Math.log(ex);
+ assertAlmostEquals(x, back, x + " (backwards)");
+ }
+}
+
+// Make sure optimizing the function does not alter the result.
+var last_call_result = exp(Math.PI);
+assertEquals(first_call_result, second_call_result);
+assertEquals(first_call_result, last_call_result);
diff --git a/src/3rdparty/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/src/3rdparty/v8/test/mjsunit/math-floor-of-div-minus-zero.js
index 2743490..7349165 100644
--- a/src/3rdparty/v8/test/mjsunit/math-floor-of-div-minus-zero.js
+++ b/src/3rdparty/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -35,6 +35,7 @@ function test_div_no_deopt_minus_zero() {
}
test_div_no_deopt_minus_zero();
+test_div_no_deopt_minus_zero();
%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
test_div_no_deopt_minus_zero();
assertTrue(2 != %GetOptimizationStatus(test_div_no_deopt_minus_zero));
diff --git a/src/3rdparty/v8/test/mjsunit/math-floor-of-div-nosudiv.js b/src/3rdparty/v8/test/mjsunit/math-floor-of-div-nosudiv.js
new file mode 100644
index 0000000..5baed2d
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/math-floor-of-div-nosudiv.js
@@ -0,0 +1,288 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nouse_inlining --noenable_sudiv
+
+// Use this function as reference. Make sure it is not inlined.
+function div(a, b) {
+ return a / b;
+}
+
+var limit = 0x1000000;
+var exhaustive_limit = 100;
+var step = 10;
+var values = [0x10000001,
+ 0x12345678,
+ -0x789abcdf, // 0x87654321
+ 0x01234567,
+ 0x76543210,
+ -0x80000000, // 0x80000000
+ 0x7fffffff,
+ -0x0fffffff, // 0xf0000001
+ 0x00000010,
+ -0x01000000 // 0xff000000
+ ];
+
+function test_div() {
+ var c = 0;
+ for (var k = 0; k <= limit; k++) {
+ if (k > exhaustive_limit) { c += step; k += c; }
+ assertEquals(Math.floor(div(k, 1)), Math.floor(k / 1));
+ assertEquals(Math.floor(div(k, -1)), Math.floor(k / -1));
+ assertEquals(Math.floor(div(k, 2)), Math.floor(k / 2));
+ assertEquals(Math.floor(div(k, -2)), Math.floor(k / -2));
+ assertEquals(Math.floor(div(k, 3)), Math.floor(k / 3));
+ assertEquals(Math.floor(div(k, -3)), Math.floor(k / -3));
+ assertEquals(Math.floor(div(k, 4)), Math.floor(k / 4));
+ assertEquals(Math.floor(div(k, -4)), Math.floor(k / -4));
+ assertEquals(Math.floor(div(k, 5)), Math.floor(k / 5));
+ assertEquals(Math.floor(div(k, -5)), Math.floor(k / -5));
+ assertEquals(Math.floor(div(k, 6)), Math.floor(k / 6));
+ assertEquals(Math.floor(div(k, -6)), Math.floor(k / -6));
+ assertEquals(Math.floor(div(k, 7)), Math.floor(k / 7));
+ assertEquals(Math.floor(div(k, -7)), Math.floor(k / -7));
+ assertEquals(Math.floor(div(k, 8)), Math.floor(k / 8));
+ assertEquals(Math.floor(div(k, -8)), Math.floor(k / -8));
+ assertEquals(Math.floor(div(k, 9)), Math.floor(k / 9));
+ assertEquals(Math.floor(div(k, -9)), Math.floor(k / -9));
+ assertEquals(Math.floor(div(k, 10)), Math.floor(k / 10));
+ assertEquals(Math.floor(div(k, -10)), Math.floor(k / -10));
+ assertEquals(Math.floor(div(k, 11)), Math.floor(k / 11));
+ assertEquals(Math.floor(div(k, -11)), Math.floor(k / -11));
+ assertEquals(Math.floor(div(k, 12)), Math.floor(k / 12));
+ assertEquals(Math.floor(div(k, -12)), Math.floor(k / -12));
+ assertEquals(Math.floor(div(k, 13)), Math.floor(k / 13));
+ assertEquals(Math.floor(div(k, -13)), Math.floor(k / -13));
+ assertEquals(Math.floor(div(k, 14)), Math.floor(k / 14));
+ assertEquals(Math.floor(div(k, -14)), Math.floor(k / -14));
+ assertEquals(Math.floor(div(k, 15)), Math.floor(k / 15));
+ assertEquals(Math.floor(div(k, -15)), Math.floor(k / -15));
+ assertEquals(Math.floor(div(k, 16)), Math.floor(k / 16));
+ assertEquals(Math.floor(div(k, -16)), Math.floor(k / -16));
+ assertEquals(Math.floor(div(k, 17)), Math.floor(k / 17));
+ assertEquals(Math.floor(div(k, -17)), Math.floor(k / -17));
+ assertEquals(Math.floor(div(k, 18)), Math.floor(k / 18));
+ assertEquals(Math.floor(div(k, -18)), Math.floor(k / -18));
+ assertEquals(Math.floor(div(k, 19)), Math.floor(k / 19));
+ assertEquals(Math.floor(div(k, -19)), Math.floor(k / -19));
+ assertEquals(Math.floor(div(k, 20)), Math.floor(k / 20));
+ assertEquals(Math.floor(div(k, -20)), Math.floor(k / -20));
+ assertEquals(Math.floor(div(k, 21)), Math.floor(k / 21));
+ assertEquals(Math.floor(div(k, -21)), Math.floor(k / -21));
+ assertEquals(Math.floor(div(k, 22)), Math.floor(k / 22));
+ assertEquals(Math.floor(div(k, -22)), Math.floor(k / -22));
+ assertEquals(Math.floor(div(k, 23)), Math.floor(k / 23));
+ assertEquals(Math.floor(div(k, -23)), Math.floor(k / -23));
+ assertEquals(Math.floor(div(k, 24)), Math.floor(k / 24));
+ assertEquals(Math.floor(div(k, -24)), Math.floor(k / -24));
+ assertEquals(Math.floor(div(k, 25)), Math.floor(k / 25));
+ assertEquals(Math.floor(div(k, -25)), Math.floor(k / -25));
+ assertEquals(Math.floor(div(k, 125)), Math.floor(k / 125));
+ assertEquals(Math.floor(div(k, -125)), Math.floor(k / -125));
+ assertEquals(Math.floor(div(k, 625)), Math.floor(k / 625));
+ assertEquals(Math.floor(div(k, -625)), Math.floor(k / -625));
+ }
+ c = 0;
+ for (var k = 0; k <= limit; k++) {
+ if (k > exhaustive_limit) { c += step; k += c; }
+ assertEquals(Math.floor(div(-k, 1)), Math.floor(-k / 1));
+ assertEquals(Math.floor(div(-k, -1)), Math.floor(-k / -1));
+ assertEquals(Math.floor(div(-k, 2)), Math.floor(-k / 2));
+ assertEquals(Math.floor(div(-k, -2)), Math.floor(-k / -2));
+ assertEquals(Math.floor(div(-k, 3)), Math.floor(-k / 3));
+ assertEquals(Math.floor(div(-k, -3)), Math.floor(-k / -3));
+ assertEquals(Math.floor(div(-k, 4)), Math.floor(-k / 4));
+ assertEquals(Math.floor(div(-k, -4)), Math.floor(-k / -4));
+ assertEquals(Math.floor(div(-k, 5)), Math.floor(-k / 5));
+ assertEquals(Math.floor(div(-k, -5)), Math.floor(-k / -5));
+ assertEquals(Math.floor(div(-k, 6)), Math.floor(-k / 6));
+ assertEquals(Math.floor(div(-k, -6)), Math.floor(-k / -6));
+ assertEquals(Math.floor(div(-k, 7)), Math.floor(-k / 7));
+ assertEquals(Math.floor(div(-k, -7)), Math.floor(-k / -7));
+ assertEquals(Math.floor(div(-k, 8)), Math.floor(-k / 8));
+ assertEquals(Math.floor(div(-k, -8)), Math.floor(-k / -8));
+ assertEquals(Math.floor(div(-k, 9)), Math.floor(-k / 9));
+ assertEquals(Math.floor(div(-k, -9)), Math.floor(-k / -9));
+ assertEquals(Math.floor(div(-k, 10)), Math.floor(-k / 10));
+ assertEquals(Math.floor(div(-k, -10)), Math.floor(-k / -10));
+ assertEquals(Math.floor(div(-k, 11)), Math.floor(-k / 11));
+ assertEquals(Math.floor(div(-k, -11)), Math.floor(-k / -11));
+ assertEquals(Math.floor(div(-k, 12)), Math.floor(-k / 12));
+ assertEquals(Math.floor(div(-k, -12)), Math.floor(-k / -12));
+ assertEquals(Math.floor(div(-k, 13)), Math.floor(-k / 13));
+ assertEquals(Math.floor(div(-k, -13)), Math.floor(-k / -13));
+ assertEquals(Math.floor(div(-k, 14)), Math.floor(-k / 14));
+ assertEquals(Math.floor(div(-k, -14)), Math.floor(-k / -14));
+ assertEquals(Math.floor(div(-k, 15)), Math.floor(-k / 15));
+ assertEquals(Math.floor(div(-k, -15)), Math.floor(-k / -15));
+ assertEquals(Math.floor(div(-k, 16)), Math.floor(-k / 16));
+ assertEquals(Math.floor(div(-k, -16)), Math.floor(-k / -16));
+ assertEquals(Math.floor(div(-k, 17)), Math.floor(-k / 17));
+ assertEquals(Math.floor(div(-k, -17)), Math.floor(-k / -17));
+ assertEquals(Math.floor(div(-k, 18)), Math.floor(-k / 18));
+ assertEquals(Math.floor(div(-k, -18)), Math.floor(-k / -18));
+ assertEquals(Math.floor(div(-k, 19)), Math.floor(-k / 19));
+ assertEquals(Math.floor(div(-k, -19)), Math.floor(-k / -19));
+ assertEquals(Math.floor(div(-k, 20)), Math.floor(-k / 20));
+ assertEquals(Math.floor(div(-k, -20)), Math.floor(-k / -20));
+ assertEquals(Math.floor(div(-k, 21)), Math.floor(-k / 21));
+ assertEquals(Math.floor(div(-k, -21)), Math.floor(-k / -21));
+ assertEquals(Math.floor(div(-k, 22)), Math.floor(-k / 22));
+ assertEquals(Math.floor(div(-k, -22)), Math.floor(-k / -22));
+ assertEquals(Math.floor(div(-k, 23)), Math.floor(-k / 23));
+ assertEquals(Math.floor(div(-k, -23)), Math.floor(-k / -23));
+ assertEquals(Math.floor(div(-k, 24)), Math.floor(-k / 24));
+ assertEquals(Math.floor(div(-k, -24)), Math.floor(-k / -24));
+ assertEquals(Math.floor(div(-k, 25)), Math.floor(-k / 25));
+ assertEquals(Math.floor(div(-k, -25)), Math.floor(-k / -25));
+ assertEquals(Math.floor(div(-k, 125)), Math.floor(-k / 125));
+ assertEquals(Math.floor(div(-k, -125)), Math.floor(-k / -125));
+ assertEquals(Math.floor(div(-k, 625)), Math.floor(-k / 625));
+ assertEquals(Math.floor(div(-k, -625)), Math.floor(-k / -625));
+ }
+ // Test for edge cases.
+ // Use (values[key] | 0) to force the integer type.
+ for (var i = 0; i < values.length; i++) {
+ for (var j = 0; j < values.length; j++) {
+ assertEquals(Math.floor(div((values[i] | 0), (values[j] | 0))),
+ Math.floor((values[i] | 0) / (values[j] | 0)));
+ assertEquals(Math.floor(div(-(values[i] | 0), (values[j] | 0))),
+ Math.floor(-(values[i] | 0) / (values[j] | 0)));
+ assertEquals(Math.floor(div((values[i] | 0), -(values[j] | 0))),
+ Math.floor((values[i] | 0) / -(values[j] | 0)));
+ assertEquals(Math.floor(div(-(values[i] | 0), -(values[j] | 0))),
+ Math.floor(-(values[i] | 0) / -(values[j] | 0)));
+ }
+ }
+}
+
+test_div();
+%OptimizeFunctionOnNextCall(test_div);
+test_div();
+
+// Test for ia32/x64 flooring correctness.
+var values2 = [1, 3, 10, 99, 100, 101, 0x7fffffff];
+function test_div2() {
+ for (var i = 0; i < values2.length; i++) {
+ for (var j = 0; j < values2.length; j++) {
+ assertEquals(Math.floor(div((values2[i] | 0), (values2[j] | 0))),
+ Math.floor((values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(Math.floor(div(-(values2[i] | 0), (values2[j] | 0))),
+ Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(Math.floor(div((values2[i] | 0), -(values2[j] | 0))),
+ Math.floor((values2[i] | 0) / -(values2[j] | 0)));
+ assertEquals(Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
+ Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
+ }
+ }
+}
+
+test_div2();
+%OptimizeFunctionOnNextCall(test_div2);
+test_div2();
+
+
+// Test for negative zero, overflow and division by 0.
+// Separate the tests to prevent deoptimizations from making the other optimized
+// test unreachable.
+
+// We box the value in an array to avoid constant propagation.
+var neg_one_in_array = [-1];
+var zero_in_array = [0];
+var min_int_in_array = [-2147483648];
+
+// Test for dividing by constant.
+function IsNegativeZero(x) {
+ assertTrue(x == 0); // Is 0 or -0.
+ var y = 1 / x;
+ assertFalse(isFinite(y));
+ return y < 0;
+}
+
+function test_div_deopt_minus_zero() {
+ for (var i = 0; i < 2; ++i) {
+ assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+ }
+}
+
+function test_div_deopt_overflow() {
+ for (var i = 0; i < 2; ++i) {
+ // We use '| 0' to force the representation to int32.
+ assertEquals(-min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / -1));
+ }
+}
+
+function test_div_deopt_div_by_zero() {
+ for (var i = 0; i < 2; ++i) {
+ assertEquals(div(i, 0),
+ Math.floor(i / 0));
+ }
+}
+
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero);
+test_div_deopt_minus_zero();
+test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
+
+// Test for dividing by variable.
+function test_div_deopt_minus_zero_v() {
+ for (var i = 0; i < 2; ++i) {
+ assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) /
+ neg_one_in_array[0])));
+ }
+}
+
+function test_div_deopt_overflow_v() {
+ for (var i = 0; i < 2; ++i) {
+ // We use '| 0' to force the representation to int32.
+ assertEquals(-min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
+ }
+}
+
+function test_div_deopt_div_by_zero_v() {
+ for (var i = 0; i < 2; ++i) {
+ assertEquals(div(i, 0),
+ Math.floor(i / zero_in_array[0]));
+ }
+}
+
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero_v);
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
diff --git a/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js b/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js
index e917182..c7ef289 100644
--- a/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js
+++ b/src/3rdparty/v8/test/mjsunit/math-floor-of-div.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --nouse_inlining
+// Flags: --allow-natives-syntax --nouse_inlining --enable_sudiv
// Use this function as reference. Make sure it is not inlined.
function div(a, b) {
@@ -184,10 +184,38 @@ test_div();
%OptimizeFunctionOnNextCall(test_div);
test_div();
-// Test for negative zero and overflow.
+// Test for ia32/x64 flooring correctness.
+var values2 = [1, 3, 10, 99, 100, 101, 0x7fffffff];
+function test_div2() {
+ for (var i = 0; i < values2.length; i++) {
+ for (var j = 0; j < values2.length; j++) {
+ assertEquals(Math.floor(div((values2[i] | 0), (values2[j] | 0))),
+ Math.floor((values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(Math.floor(div(-(values2[i] | 0), (values2[j] | 0))),
+ Math.floor(-(values2[i] | 0) / (values2[j] | 0)));
+ assertEquals(Math.floor(div((values2[i] | 0), -(values2[j] | 0))),
+ Math.floor((values2[i] | 0) / -(values2[j] | 0)));
+ assertEquals(Math.floor(div(-(values2[i] | 0), -(values2[j] | 0))),
+ Math.floor(-(values2[i] | 0) / -(values2[j] | 0)));
+ }
+ }
+}
+
+test_div2();
+%OptimizeFunctionOnNextCall(test_div2);
+test_div2();
+
+
+// Test for negative zero, overflow and division by 0.
// Separate the tests to prevent deoptimizations from making the other optimized
// test unreachable.
+// We box the value in an array to avoid constant propagation.
+var neg_one_in_array = [-1];
+var zero_in_array = [0];
+var min_int_in_array = [-2147483648];
+
+// Test for dividing by constant.
function IsNegativeZero(x) {
assertTrue(x == 0); // Is 0 or -0.
var y = 1 / x;
@@ -196,21 +224,65 @@ function IsNegativeZero(x) {
}
function test_div_deopt_minus_zero() {
- var zero_in_array = [0];
- assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+ for (var i = 0; i < 2; ++i) {
+ assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) / -1)));
+ }
}
function test_div_deopt_overflow() {
- // We box the value in an array to avoid constant propagation.
- var min_int_in_array = [-2147483648];
- // We use '| 0' to force the representation to int32.
- assertEquals(-min_int_in_array[0],
- Math.floor((min_int_in_array[0] | 0) / -1));
+ for (var i = 0; i < 2; ++i) {
+ // We use '| 0' to force the representation to int32.
+ assertEquals(-min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / -1));
+ }
+}
+
+function test_div_deopt_div_by_zero() {
+ for (var i = 0; i < 2; ++i) {
+ assertEquals(div(i, 0),
+ Math.floor(i / 0));
+ }
}
test_div_deopt_minus_zero();
test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero);
%OptimizeFunctionOnNextCall(test_div_deopt_overflow);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero);
test_div_deopt_minus_zero();
test_div_deopt_overflow();
+test_div_deopt_div_by_zero();
+
+// Test for dividing by variable.
+function test_div_deopt_minus_zero_v() {
+ for (var i = 0; i < 2; ++i) {
+ assertTrue(IsNegativeZero(Math.floor((zero_in_array[0] | 0) /
+ neg_one_in_array[0])));
+ }
+}
+
+function test_div_deopt_overflow_v() {
+ for (var i = 0; i < 2; ++i) {
+ // We use '| 0' to force the representation to int32.
+ assertEquals(-min_int_in_array[0],
+ Math.floor((min_int_in_array[0] | 0) / neg_one_in_array[0]));
+ }
+}
+
+function test_div_deopt_div_by_zero_v() {
+ for (var i = 0; i < 2; ++i) {
+ assertEquals(div(i, 0),
+ Math.floor(i / zero_in_array[0]));
+ }
+}
+
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
+%OptimizeFunctionOnNextCall(test_div_deopt_minus_zero_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_overflow_v);
+%OptimizeFunctionOnNextCall(test_div_deopt_div_by_zero_v);
+test_div_deopt_minus_zero_v();
+test_div_deopt_overflow_v();
+test_div_deopt_div_by_zero_v();
diff --git a/src/3rdparty/v8/test/mjsunit/mjsunit.status b/src/3rdparty/v8/test/mjsunit/mjsunit.status
index 0bf378b..26c8359 100644
--- a/src/3rdparty/v8/test/mjsunit/mjsunit.status
+++ b/src/3rdparty/v8/test/mjsunit/mjsunit.status
@@ -40,6 +40,9 @@ regress/regress-524: SKIP
# Skip long running test in debug and allow it to timeout in release mode.
# regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
+# Deferred stack trace formatting is temporarily disabled.
+stack-traces-gc: PASS || FAIL
+
##############################################################################
# Too slow in debug mode with --stress-opt
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
@@ -66,8 +69,9 @@ unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == androi
json-recursive: PASS, (PASS || FAIL) if $mode == debug
##############################################################################
-# Skip long running test that times out in debug mode.
-regress/regress-crbug-160010: PASS, SKIP if $mode == debug
+# Skip long running test that times out in debug mode or goes OOM on android.
+regress/regress-crbug-160010: PASS, SKIP if ($mode == debug || $arch == android_arm)
+generated-transition-stub: PASS, SKIP if $mode == debug
##############################################################################
# This test sets the umask on a per-process basis and hence cannot be
@@ -77,6 +81,14 @@ d8-os: PASS, SKIP if ($isolates || $arch == android_arm || $arch == android_ia32
tools/tickprocessor: PASS, SKIP if ($arch == android_arm || $arch == android_ia32)
##############################################################################
+# This test is the same as math-floor-of-div for non ARM architectures.
+math-floor-of-div-nosudiv: PASS, SKIP if ($arch != arm && $arch != android_arm)
+
+##############################################################################
+# Long running test that reproduces memory leak and should be run manually.
+regress/regress-2073: SKIP
+
+##############################################################################
[ $arch == arm || $arch == android_arm ]
# Slow tests which times out in debug mode.
diff --git a/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js b/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js
index b676f01..4c27ea4 100755
--- a/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js
+++ b/src/3rdparty/v8/test/mjsunit/regexp-capture-3.js
@@ -165,23 +165,22 @@ function NoHang(re) {
"This is an ASCII string that could take forever".match(re);
}
-
-NoHang(/(((.*)*)*x)Ã¥/); // Continuation after loop is filtered, so is loop.
-NoHang(/(((.*)*)*Ã¥)foo/); // Body of loop filtered.
-NoHang(/Ã¥(((.*)*)*x)/); // Everything after a filtered character is filtered.
-NoHang(/(((.*)*)*x)Ã¥/); // Everything before a filtered character is filtered.
-NoHang(/[æøå](((.*)*)*x)/); // Everything after a filtered class is filtered.
-NoHang(/(((.*)*)*x)[æøå]/); // Everything before a filtered class is filtered.
-NoHang(/[^\x00-\x7f](((.*)*)*x)/); // After negated class.
-NoHang(/(((.*)*)*x)[^\x00-\x7f]/); // Before negated class.
-NoHang(/(?!(((.*)*)*x)Ã¥)foo/); // Negative lookahead is filtered.
-NoHang(/(?!(((.*)*)*x))Ã¥/); // Continuation branch of negative lookahead.
-NoHang(/(?=(((.*)*)*x)Ã¥)foo/); // Positive lookahead is filtered.
-NoHang(/(?=(((.*)*)*x))Ã¥/); // Continuation branch of positive lookahead.
-NoHang(/(?=Ã¥)(((.*)*)*x)/); // Positive lookahead also prunes continuation.
-NoHang(/(æ|ø|å)(((.*)*)*x)/); // All branches of alternation are filtered.
-NoHang(/(a|b|(((.*)*)*x))Ã¥/); // 1 out of 3 branches pruned.
-NoHang(/(a|(((.*)*)*x)ø|(((.*)*)*x)å)/); // 2 out of 3 branches pruned.
+NoHang(/(((.*)*)*x)Ä€/); // Continuation after loop is filtered, so is loop.
+NoHang(/(((.*)*)*Ä€)foo/); // Body of loop filtered.
+NoHang(/Ä€(((.*)*)*x)/); // Everything after a filtered character is filtered.
+NoHang(/(((.*)*)*x)Ä€/); // Everything before a filtered character is filtered.
+NoHang(/[ćăĀ](((.*)*)*x)/); // Everything after a filtered class is filtered.
+NoHang(/(((.*)*)*x)[ćăĀ]/); // Everything before a filtered class is filtered.
+NoHang(/[^\x00-\xff](((.*)*)*x)/); // After negated class.
+NoHang(/(((.*)*)*x)[^\x00-\xff]/); // Before negated class.
+NoHang(/(?!(((.*)*)*x)Ä€)foo/); // Negative lookahead is filtered.
+NoHang(/(?!(((.*)*)*x))Ä€/); // Continuation branch of negative lookahead.
+NoHang(/(?=(((.*)*)*x)Ä€)foo/); // Positive lookahead is filtered.
+NoHang(/(?=(((.*)*)*x))Ä€/); // Continuation branch of positive lookahead.
+NoHang(/(?=Ä€)(((.*)*)*x)/); // Positive lookahead also prunes continuation.
+NoHang(/(æ|ø|Ā)(((.*)*)*x)/); // All branches of alternation are filtered.
+NoHang(/(a|b|(((.*)*)*x))Ä€/); // 1 out of 3 branches pruned.
+NoHang(/(a|(((.*)*)*x)ă|(((.*)*)*x)Ā)/); // 2 out of 3 branches pruned.
var s = "Don't prune based on a repetition of length 0";
assertEquals(null, s.match(/Ã¥{1,1}prune/));
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-121407.js b/src/3rdparty/v8/test/mjsunit/regress/regress-121407.js
index 25033fb..4403708 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-121407.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-121407.js
@@ -37,4 +37,4 @@ a[2000000] = 2000000;
a.length=2000;
for (var i = 0; i <= 256; i++) {
a[i] = new Object();
-} \ No newline at end of file
+}
diff --git a/src/3rdparty/v8/src/inspector.h b/src/3rdparty/v8/test/mjsunit/regress/regress-147497.js
index 6962e21..92e29d1 100644
--- a/src/3rdparty/v8/src/inspector.h
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-147497.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,36 +25,21 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --expose-debug-as debug
-#ifndef V8_INSPECTOR_H_
-#define V8_INSPECTOR_H_
+Debug = debug.Debug;
-// Only build this code if we're configured with the INSPECTOR.
-#ifdef INSPECTOR
-
-#include "v8.h"
-
-#include "objects.h"
-
-namespace v8 {
-namespace internal {
-
-class Inspector {
- public:
- static void DumpObjectType(FILE* out, Object* obj, bool print_more);
- static void DumpObjectType(FILE* out, Object* obj) {
- DumpObjectType(out, obj, false);
- }
- static void DumpObjectType(Object* obj, bool print_more) {
- DumpObjectType(stdout, obj, print_more);
- }
- static void DumpObjectType(Object* obj) {
- DumpObjectType(stdout, obj, false);
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.Break) {
+ exec_state.prepareStep(Debug.StepAction.StepNext, 10);
}
};
-} } // namespace v8::internal
+Debug.setListener(listener);
-#endif // INSPECTOR
+var statement = "";
+for (var i = 0; i < 1024; i++) statement += "z";
+statement = 'with(0)' + statement + '=function foo(){}';
-#endif // V8_INSPECTOR_H_
+debugger;
+eval(statement);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-164442.js b/src/3rdparty/v8/test/mjsunit/regress/regress-164442.js
new file mode 100644
index 0000000..1160d87
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-164442.js
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Should not take a very long time (n^2 algorithms are bad)
+
+
+function ensureNotNegative(x) {
+ return Math.max(0, x | 0);
+}
+
+
+ensureNotNegative(1);
+ensureNotNegative(2);
+
+%OptimizeFunctionOnNextCall(ensureNotNegative);
+
+var r = ensureNotNegative(-1);
+
+assertEquals(0, r);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-165637.js b/src/3rdparty/v8/test/mjsunit/regress/regress-165637.js
new file mode 100644
index 0000000..72af528
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-165637.js
@@ -0,0 +1,61 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Should not take a very long time (n^2 algorithms are bad)
+
+function do_slices() {
+ var data = new Array(1024 * 12); // 12kB
+
+ for (var i = 0; i < data.length; i++) {
+ data[i] = 255;
+ }
+
+ var start = Date.now();
+
+ for (i = 0; i < 20000; i++) {
+ data.slice(4, 1);
+ }
+
+ return Date.now() - start;
+}
+
+// Reset the GC stress mode to be off. Needed so that the runtime of this test
+// stays within bounds even if we run in GC stress mode.
+%SetFlags("--gc-interval=-1 --noforce-marking-deque-overflows");
+
+// Should never take more than 3 seconds (if the bug is fixed, the test takes
+// considerably less time than 3 seconds).
+assertTrue(do_slices() < (3 * 1000));
+
+// Make sure that packed and unpacked array slices are still properly handled
+var holey_array = [1, 2, 3, 4, 5,,,,,,];
+assertFalse(%HasFastHoleyElements(holey_array.slice(6, 1)));
+assertEquals(undefined, holey_array.slice(6, 7)[0])
+assertFalse(%HasFastHoleyElements(holey_array.slice(2, 1)));
+assertEquals(3, holey_array.slice(2, 3)[0])
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-166379.js b/src/3rdparty/v8/test/mjsunit/regress/regress-166379.js
new file mode 100644
index 0000000..b19afbd
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-166379.js
@@ -0,0 +1,39 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function mod(a, b) { return a % b; }
+
+// Feed integer type info and optimize.
+assertEquals(0, mod(4, 2));
+assertEquals(1, mod(3, 2));
+%OptimizeFunctionOnNextCall(mod);
+
+// Surprise mod with overflow.
+assertEquals(-Infinity, 1/mod(-2147483648, -1));
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-166553.js b/src/3rdparty/v8/test/mjsunit/regress/regress-166553.js
new file mode 100644
index 0000000..acaf34f
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-166553.js
@@ -0,0 +1,33 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose_gc
+
+JSON.stringify(String.fromCharCode(1, -11).toString())
+gc();
+var s = String.fromCharCode(1, -11)
+assertEquals(65525, s.charCodeAt(1))
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-1692.js b/src/3rdparty/v8/test/mjsunit/regress/regress-1692.js
index 06bd66c..32be87f 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-1692.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-1692.js
@@ -82,7 +82,7 @@ var o = Object("string");
// Non-string property on String object.
o[10] = 42;
assertTrue(o.propertyIsEnumerable(10));
-assertFalse(o.propertyIsEnumerable(0));
+assertTrue(o.propertyIsEnumerable(0));
// Fast elements.
var o = [1,2,3,4,5];
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-171641.js b/src/3rdparty/v8/test/mjsunit/regress/regress-171641.js
new file mode 100644
index 0000000..8db6781
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-171641.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function foo(k, p) {
+ for (var i = 0; i < 1; i++) {
+ p = Math.min(p, i);
+ }
+ m = Math.floor((k | 0) / p);
+}
+
+foo(0, 1);
+foo(0, 1);
+%OptimizeFunctionOnNextCall(foo);
+foo(0, 1);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-1980.js b/src/3rdparty/v8/test/mjsunit/regress/regress-1980.js
index 49dfd06..d87ff45 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-1980.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-1980.js
@@ -34,7 +34,7 @@ for (var i = 0; i < invalid_this.length; i++) {
Error.prototype.toString.call(invalid_this[i]);
} catch (e) {
exception = true;
- assertTrue("called_on_non_object" == e.type);
+ assertEquals("Error.prototype.toString called on non-object", e.message);
}
assertTrue(exception);
}
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2073.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2073.js
new file mode 100644
index 0000000..4e40b04
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2073.js
@@ -0,0 +1,99 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Running this test with --trace_gc will show heap size growth due to
+// leaking objects via embedded maps in optimized code.
+
+var counter = 0;
+
+function nextid() {
+ counter += 1;
+ return counter;
+}
+
+function Scope() {
+ this.id = nextid();
+ this.parent = null;
+ this.left = null;
+ this.right = null;
+ this.head = null;
+ this.tail = null;
+ this.counter = 0;
+}
+
+Scope.prototype = {
+ new: function() {
+ var Child,
+ child;
+ Child = function() {};
+ Child.prototype = this;
+ child = new Child();
+ child.id = nextid();
+ child.parent = this;
+ child.left = this.last;
+ child.right = null;
+ child.head = null;
+ child.tail = null;
+ child.counter = 0;
+ if (this.head) {
+ this.tail.right = child;
+ this.tail = child;
+ } else {
+ this.head = this.tail = child;
+ }
+ return child;
+ },
+
+ destroy: function() {
+ if ($root == this) return;
+ var parent = this.parent;
+ if (parent.head == this) parent.head = this.right;
+ if (parent.tail == this) parent.tail = this.left;
+ if (this.left) this.left.right = this.right;
+ if (this.right) this.right.left = this.left;
+ }
+};
+
+function inc(scope) {
+ scope.counter = scope.counter + 1;
+}
+
+var $root = new Scope();
+
+n = 100000;
+m = 10;
+
+function doit() {
+ var a = $root.new();
+ var b = a.new();
+ inc(b);
+ if (i > m) $root.head.destroy();
+}
+
+for (var i = 0; i < n; i++) {
+ doit();
+}
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2185.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2185.js
index 895f322..9b91066 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-2185.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2185.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --noenable-slow-asserts
+
var a = [];
for (var i = 0; i < 2; i++) {
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2243.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2243.js
new file mode 100644
index 0000000..31c2e55
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2243.js
@@ -0,0 +1,31 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+assertThrows("'use strict'; (function f() { f = 123; })", SyntaxError);
+assertThrows("(function f() { 'use strict'; f = 123; })", SyntaxError);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2263.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2263.js
new file mode 100644
index 0000000..9a9db58
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2263.js
@@ -0,0 +1,30 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var obj = { length: { valueOf: function(){ throw { type: "length" }}}};
+var sep = { toString: function(){ throw { type: "toString" }}};
+assertThrows("Array.prototype.join.call(obj, sep)", undefined, "length");
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2315.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2315.js
new file mode 100644
index 0000000..a3f9182
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2315.js
@@ -0,0 +1,40 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var foo = (function() {
+ return eval("(function bar() { return 1; })");
+})();
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+
+// Function should be optimized now.
+assertTrue(%GetOptimizationStatus(foo) != 2);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2410.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2410.js
new file mode 100644
index 0000000..c16fd14
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2410.js
@@ -0,0 +1,36 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Object.prototype should be ignored in Object.getOwnPropertyNames
+//
+// See http://code.google.com/p/v8/issues/detail?id=2410 for details.
+
+Object.defineProperty(Object.prototype,
+ 'thrower',
+ { get: function() { throw Error('bug') } });
+var obj = { thrower: 'local' };
+assertEquals(['thrower'], Object.getOwnPropertyNames(obj));
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2416.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2416.js
new file mode 100644
index 0000000..02afeb9
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2416.js
@@ -0,0 +1,75 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertFalse(2147483647 < -2147483648)
+assertFalse(2147483647 <= -2147483648)
+assertFalse(2147483647 == -2147483648)
+assertTrue(2147483647 >= -2147483648)
+assertTrue(2147483647 > -2147483648)
+
+assertTrue(-2147483648 < 2147483647)
+assertTrue(-2147483648 <= 2147483647)
+assertFalse(-2147483648 == 2147483647)
+assertFalse(-2147483648 >= 2147483647)
+assertFalse(-2147483648 > 2147483647)
+
+assertFalse(2147483647 < 2147483647)
+assertTrue(2147483647 <= 2147483647)
+assertTrue(2147483647 == 2147483647)
+assertTrue(2147483647 >= 2147483647)
+assertFalse(2147483647 > 2147483647)
+
+assertFalse(-2147483648 < -2147483648)
+assertTrue(-2147483648 <= -2147483648)
+assertTrue(-2147483648 == -2147483648)
+assertTrue(-2147483648 >= -2147483648)
+assertFalse(-2147483648 > -2147483648)
+
+
+assertFalse(1073741823 < -1073741824)
+assertFalse(1073741823 <= -1073741824)
+assertFalse(1073741823 == -1073741824)
+assertTrue(1073741823 >= -1073741824)
+assertTrue(1073741823 > -1073741824)
+
+assertTrue(-1073741824 < 1073741823)
+assertTrue(-1073741824 <= 1073741823)
+assertFalse(-1073741824 == 1073741823)
+assertFalse(-1073741824 >= 1073741823)
+assertFalse(-1073741824 > 1073741823)
+
+assertFalse(1073741823 < 1073741823)
+assertTrue(1073741823 <= 1073741823)
+assertTrue(1073741823 == 1073741823)
+assertTrue(1073741823 >= 1073741823)
+assertFalse(1073741823 > 1073741823)
+
+assertFalse(-1073741824 < -1073741824)
+assertTrue(-1073741824 <= -1073741824)
+assertTrue(-1073741824 == -1073741824)
+assertTrue(-1073741824 >= -1073741824)
+assertFalse(-1073741824 > -1073741824)
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2419.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2419.js
new file mode 100644
index 0000000..9cd453a
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2419.js
@@ -0,0 +1,37 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [5, 4, 3, 2, 1, 0];
+Object.freeze(a);
+a.sort();
+assertArrayEquals([5, 4, 3, 2, 1, 0], a);
+
+var b = {0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, length: 6};
+Object.freeze(b);
+Array.prototype.sort.call(b);
+assertPropertiesEqual({0: 5, 1: 4, 2: 3, 3: 2, 4: 1, 5: 0, length: 6}, b);
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2433.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2433.js
new file mode 100644
index 0000000..dfe7131
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2433.js
@@ -0,0 +1,36 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Transitioning from a PackedSmi to PackedDouble should fill the destination
+// with holes.
+//
+// See http://code.google.com/p/v8/issues/detail?id=2433 for details.
+
+arr = [];
+arr[0] = 0;
+arr[0] = 1.1;
+assertEquals(undefined, arr[1]);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2437.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2437.js
new file mode 100644
index 0000000..c82293a
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2437.js
@@ -0,0 +1,156 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Summary of the spec: lastIndex is reset to 0 if
+// - a regexp fails to match, regardless of global or non-global.
+// - a global regexp is used in a function that returns multiple results,
+// such as String.prototype.replace or String.prototype.match, since it
+// repeats the regexp until it fails to match.
+// Otherwise lastIndex is only set when a global regexp matches, to the index
+// after the match.
+
+// Test Regexp.prototype.exec
+r = /a/;
+r.lastIndex = 1;
+r.exec("zzzz");
+assertEquals(0, r.lastIndex);
+
+// Test Regexp.prototype.test
+r = /a/;
+r.lastIndex = 1;
+r.test("zzzz");
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.match
+r = /a/;
+r.lastIndex = 1;
+"zzzz".match(r);
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.replace with atomic regexp and empty string.
+r = /a/;
+r.lastIndex = 1;
+"zzzz".replace(r, "");
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.replace with non-atomic regexp and empty string.
+r = /\d/;
+r.lastIndex = 1;
+"zzzz".replace(r, "");
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.replace with atomic regexp and non-empty string.
+r = /a/;
+r.lastIndex = 1;
+"zzzz".replace(r, "a");
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.replace with non-atomic regexp and non-empty string.
+r = /\d/;
+r.lastIndex = 1;
+"zzzz".replace(r, "a");
+assertEquals(0, r.lastIndex);
+
+// Test String.prototype.replace with replacement function
+r = /a/;
+r.lastIndex = 1;
+"zzzz".replace(r, function() { return ""; });
+assertEquals(0, r.lastIndex);
+
+// Regexp functions that returns multiple results:
+// A global regexp always resets lastIndex regardless of whether it matches.
+r = /a/g;
+r.lastIndex = -1;
+"0123abcd".replace(r, "x");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = -1;
+"01234567".replace(r, "x");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = -1;
+"0123abcd".match(r);
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = -1;
+"01234567".match(r);
+assertEquals(0, r.lastIndex);
+
+// A non-global regexp resets lastIndex iff it does not match.
+r = /a/;
+r.lastIndex = -1;
+"0123abcd".replace(r, "x");
+assertEquals(-1, r.lastIndex);
+
+r.lastIndex = -1;
+"01234567".replace(r, "x");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = -1;
+"0123abcd".match(r);
+assertEquals(-1, r.lastIndex);
+
+r.lastIndex = -1;
+"01234567".match(r);
+assertEquals(0, r.lastIndex);
+
+// Also test RegExp.prototype.exec and RegExp.prototype.test
+r = /a/g;
+r.lastIndex = 1;
+r.exec("01234567");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = 1;
+r.exec("0123abcd");
+assertEquals(5, r.lastIndex);
+
+r = /a/;
+r.lastIndex = 1;
+r.exec("01234567");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = 1;
+r.exec("0123abcd");
+assertEquals(1, r.lastIndex);
+
+r = /a/g;
+r.lastIndex = 1;
+r.test("01234567");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = 1;
+r.test("0123abcd");
+assertEquals(5, r.lastIndex);
+
+r = /a/;
+r.lastIndex = 1;
+r.test("01234567");
+assertEquals(0, r.lastIndex);
+
+r.lastIndex = 1;
+r.test("0123abcd");
+assertEquals(1, r.lastIndex);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2438.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2438.js
new file mode 100644
index 0000000..3f4fd7d
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2438.js
@@ -0,0 +1,52 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function testSideEffects(subject, re) {
+ var counter = 0;
+ var side_effect_object = { valueOf: function() { return counter++; } };
+ re.lastIndex = side_effect_object;
+ re.exec(subject);
+ assertEquals(1, counter);
+
+ re.lastIndex = side_effect_object;
+ re.test(subject);
+ assertEquals(2, counter);
+
+ re.lastIndex = side_effect_object;
+ subject.match(re);
+ assertEquals(3, counter);
+
+ re.lastIndex = side_effect_object;
+ subject.replace(re, "");
+ assertEquals(4, counter);
+}
+
+testSideEffects("zzzz", /a/);
+testSideEffects("zzzz", /a/g);
+testSideEffects("xaxa", /a/);
+testSideEffects("xaxa", /a/g);
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2441.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2441.js
new file mode 100644
index 0000000..72ce248
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2441.js
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var o = {};
+Object.preventExtensions(o);
+assertThrows("Object.defineProperty(o, 'foobarloo', {value:{}});", TypeError);
+assertThrows("Object.defineProperty(o, '__proto__', {value:{}});", TypeError);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2443.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2443.js
new file mode 100644
index 0000000..0800c45
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2443.js
@@ -0,0 +1,129 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Number.prototype methods on non-Numbers.
+
+assertThrows(function() { Number.prototype.toExponential.call({}) },
+ TypeError);
+
+assertThrows(function() { Number.prototype.toPrecision.call({}) },
+ TypeError);
+
+assertThrows(function() { Number.prototype.toFixed.call({}) },
+ TypeError);
+
+assertThrows(function() { Number.prototype.toString.call({}) },
+ TypeError);
+
+assertThrows(function() { Number.prototype.toLocaleString.call({}) },
+ TypeError);
+
+assertThrows(function() { Number.prototype.ValueOf.call({}) },
+ TypeError);
+
+
+// Call on Number objects with custom valueOf method.
+
+var x_obj = new Number(1);
+x_obj.valueOf = function() { assertUnreachable(); };
+
+assertEquals("1.00e+0",
+ Number.prototype.toExponential.call(x_obj, 2));
+
+assertEquals("1.0",
+ Number.prototype.toPrecision.call(x_obj, 2));
+
+assertEquals("1.00",
+ Number.prototype.toFixed.call(x_obj, 2));
+
+// Call on primitive numbers.
+assertEquals("1.00e+0",
+ Number.prototype.toExponential.call(1, 2));
+
+assertEquals("1.0",
+ Number.prototype.toPrecision.call(1, 2));
+
+assertEquals("1.00",
+ Number.prototype.toFixed.call(1, 2));
+
+
+// toExponential and toPrecision does following steps in order
+// 1) convert the argument using ToInteger
+// 2) check for non-finite receiver, on which it returns,
+// 3) check argument range and throw exception if out of range.
+// Note that the the last two steps are reversed for toFixed.
+// Luckily, the receiver is expected to be a number or number
+// wrapper, so that getting its value is not observable.
+
+var f_flag = false;
+var f_obj = { valueOf: function() { f_flag = true; return 1000; } };
+
+assertEquals("NaN",
+ Number.prototype.toExponential.call(NaN, f_obj));
+assertTrue(f_flag);
+
+f_flag = false;
+assertEquals("Infinity",
+ Number.prototype.toExponential.call(1/0, f_obj));
+assertTrue(f_flag);
+
+f_flag = false;
+assertEquals("-Infinity",
+ Number.prototype.toExponential.call(-1/0, f_obj));
+assertTrue(f_flag);
+
+f_flag = false;
+assertEquals("NaN",
+ Number.prototype.toPrecision.call(NaN, f_obj));
+assertTrue(f_flag);
+
+f_flag = false;
+assertEquals("Infinity",
+ Number.prototype.toPrecision.call(1/0, f_obj));
+assertTrue(f_flag);
+
+f_flag = false;
+assertEquals("-Infinity",
+ Number.prototype.toPrecision.call(-1/0, f_obj));
+assertTrue(f_flag);
+
+// The odd man out: toFixed.
+
+f_flag = false;
+assertThrows(function() { Number.prototype.toFixed.call(NaN, f_obj) },
+ RangeError);
+assertTrue(f_flag);
+
+f_flag = false;
+assertThrows(function() { Number.prototype.toFixed.call(1/0, f_obj) },
+ RangeError);
+assertTrue(f_flag);
+
+f_flag = false;
+assertThrows(function() { Number.prototype.toFixed.call(-1/0, f_obj) },
+ RangeError);
+assertTrue(f_flag);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2444.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2444.js
new file mode 100644
index 0000000..8fb8d8b
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2444.js
@@ -0,0 +1,120 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+var flags;
+
+function resetFlags(size) {
+ flags = Array(size);
+ while (size--) flags[size] = 0;
+}
+
+function assertFlags(array) {
+ assertArrayEquals(array, flags);
+}
+
+function object_factory(flag_index, value, expected_flags) {
+ var obj = {};
+ obj.valueOf = function() {
+ assertFlags(expected_flags);
+ flags[flag_index]++;
+ return value;
+ }
+ return obj;
+}
+
+
+assertEquals(-Infinity, Math.max());
+
+resetFlags(1);
+assertEquals(NaN,
+ Math.max(object_factory(0, NaN, [0])));
+assertFlags([1]);
+
+resetFlags(2);
+assertEquals(NaN,
+ Math.max(object_factory(0, NaN, [0, 0]),
+ object_factory(1, 0, [1, 0])));
+assertFlags([1, 1]);
+
+resetFlags(3);
+assertEquals(NaN,
+ Math.max(object_factory(0, NaN, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, 1, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+resetFlags(3);
+assertEquals(NaN,
+ Math.max(object_factory(0, 2, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, NaN, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+resetFlags(3);
+assertEquals(2,
+ Math.max(object_factory(0, 2, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, 1, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+
+assertEquals(+Infinity, Math.min());
+
+resetFlags(1);
+assertEquals(NaN,
+ Math.min(object_factory(0, NaN, [0])));
+assertFlags([1]);
+
+resetFlags(2);
+assertEquals(NaN,
+ Math.min(object_factory(0, NaN, [0, 0]),
+ object_factory(1, 0, [1, 0])));
+assertFlags([1, 1]);
+
+resetFlags(3);
+assertEquals(NaN,
+ Math.min(object_factory(0, NaN, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, 1, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+resetFlags(3);
+assertEquals(NaN,
+ Math.min(object_factory(0, 2, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, NaN, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+resetFlags(3);
+assertEquals(0,
+ Math.min(object_factory(0, 2, [0, 0, 0]),
+ object_factory(1, 0, [1, 0, 0]),
+ object_factory(2, 1, [1, 1, 0])));
+assertFlags([1, 1, 1]);
+
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2451.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2451.js
new file mode 100644
index 0000000..465e4e6
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2451.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ assertEquals(-1.0, Math.round(-1.5));
+ assertEquals(-2.0, Math.round(-2.5));
+ assertEquals(-1.0, Math.round(-0.5000000000000001));
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+assertTrue(%GetOptimizationStatus(f) != 2);
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2489.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2489.js
new file mode 100644
index 0000000..882c4f7
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2489.js
@@ -0,0 +1,50 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+"use strict";
+
+function f(a, b) {
+ return g("c", "d");
+}
+
+function g(a, b) {
+ g.constructor.apply(this, arguments);
+}
+
+g.constructor = function(a, b) {
+ assertEquals("c", a);
+ assertEquals("d", b);
+}
+
+f("a", "b");
+f("a", "b");
+%OptimizeFunctionOnNextCall(f);
+f("a", "b");
+g.x = "deopt";
+f("a", "b");
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2499.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2499.js
new file mode 100644
index 0000000..52aad87
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2499.js
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function foo(word, nBits) {
+ return (word[1] >>> nBits) | (word[0] << (32 - nBits));
+}
+
+word = [0x1001, 0];
+
+var expected = foo(word, 1);
+foo(word, 1);
+%OptimizeFunctionOnNextCall(foo);
+var optimized = foo(word, 1);
+assertEquals(expected, optimized)
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2537.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2537.js
new file mode 100644
index 0000000..c6b5af9
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2537.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var large_int = 0x40000000;
+
+function foo(x, expected) {
+ assertEquals(expected, x); // This succeeds.
+ x += 0; // Force int32 representation so that CompareIDAndBranch is used.
+ if (3 != x) {
+ x += 0; // Poor man's "iDef".
+ // Fails due to Smi-tagging without overflow check.
+ assertEquals(expected, x);
+ }
+}
+
+foo(1, 1);
+foo(3, 3);
+%OptimizeFunctionOnNextCall(foo);
+foo(large_int, large_int);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-2539.js b/src/3rdparty/v8/test/mjsunit/regress/regress-2539.js
new file mode 100644
index 0000000..5d263f8
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-2539.js
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+"use strict";
+var dispatcher = {};
+dispatcher.func = C;
+
+function A() {
+ B(10, 11);
+}
+
+function B(x,y) {
+ x = 0; y = 0;
+ dispatcher.func.apply(this, arguments);
+ assertSame(2, arguments.length);
+ assertSame(10, arguments[0]);
+ assertSame(11, arguments[1]);
+}
+
+function C(x,y) {
+ assertSame(2, arguments.length);
+ assertSame(10, arguments[0]);
+ assertSame(11, arguments[1]);
+}
+
+A();
+A();
+%OptimizeFunctionOnNextCall(A);
+A();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-492.js b/src/3rdparty/v8/test/mjsunit/regress/regress-492.js
index a8b783b..53b3195 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-492.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-492.js
@@ -29,7 +29,7 @@
// This should not hit any asserts in debug mode on ARM.
function function_with_n_args(n) {
- var source = '(function f(';
+ var source = '(function f' + n + '(';
for (var arg = 0; arg < n; arg++) {
if (arg != 0) source += ',';
source += 'arg' + arg;
@@ -50,3 +50,41 @@ for (args = 500; args < 520; args++) {
for (args = 1019; args < 1041; args++) {
function_with_n_args(args);
}
+
+
+function foo(
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x
+) {}
+
+for (var i = 0; i < 10000; ++i) foo();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-135066.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-135066.js
index 1aeca8b..35e9ff8 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-135066.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-135066.js
@@ -29,25 +29,27 @@
var filler = "//" + new Array(1024).join('x');
// Test strict eval in global context.
-eval(
+assertEquals(23, eval(
"'use strict';" +
"var x = 23;" +
"var f = function bozo1() {" +
" return x;" +
"};" +
"assertSame(23, f());" +
+ "f;" +
filler
-);
+)());
// Test default eval in strict context.
-(function() {
+assertEquals(42, (function() {
"use strict";
- eval(
+ return eval(
"var y = 42;" +
"var g = function bozo2() {" +
" return y;" +
"};" +
"assertSame(42, g());" +
+ "g;" +
filler
- );
-})();
+ )();
+})());
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-146910.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-146910.js
index 120f809..1b2a60a 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-146910.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-146910.js
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,14 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-var x = [];
-assertSame(0, x.length);
-assertSame(undefined, x[0]);
+assertEquals(String.fromCharCode(97, 220, 256), 'a' + '\u00DC' + '\u0100');
+assertEquals(String.fromCharCode(97, 220, 256), 'a\u00DC\u0100');
-Object.defineProperty(x, '0', { value: 7, configurable: false });
-assertSame(1, x.length);
-assertSame(7, x[0]);
+assertEquals(['a', 'b', '\xdc'], ['b', '\xdc', 'a'].sort());
+assertEquals(['\xfc\xdc', '\xfc'], new RegExp('(\xdc)\\1', 'i').exec('\xfc\xdc'));
-x.length = 0;
-assertSame(1, x.length);
-assertSame(7, x[0]);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-160010.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-160010.js
index 266e545..586bddd 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-160010.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-160010.js
@@ -25,9 +25,11 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
var str = "a";
for (var i = 0; i < 28; i++) {
str += str;
+ %FlattenString(str); // Evil performance hack
}
JSON.stringify(str);
-
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-162085.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-162085.js
new file mode 100644
index 0000000..a53b2c9
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-162085.js
@@ -0,0 +1,71 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Ensure extending an empty packed smi array with a double initializes the
+// array with holes.
+var a = [1,2,3];
+a.length = 0;
+a[0] = 1.4;
+assertEquals(1.4, a[0]);
+assertEquals(undefined, a[1]);
+assertEquals(undefined, a[2]);
+assertEquals(undefined, a[3]);
+
+// Ensure the double array growstub initializes the array with holes.
+function grow_store(a,i,v) {
+ a[i] = v;
+}
+
+var a2 = [1.3];
+grow_store(a2,1,1.4);
+a2.length = 0;
+grow_store(a2,0,1.5);
+assertEquals(1.5, a2[0]);
+assertEquals(undefined, a2[1]);
+assertEquals(undefined, a2[2]);
+assertEquals(undefined, a2[3]);
+
+// Check storing objects using the double grow stub.
+var a3 = [1.3];
+var o = {};
+grow_store(a3, 1, o);
+assertEquals(1.3, a3[0]);
+assertEquals(o, a3[1]);
+
+// Ensure the double array growstub initializes the array with holes.
+function grow_store2(a,i,v) {
+ a[i] = v;
+}
+
+var a4 = [1.3];
+grow_store2(a4,1,1.4);
+a4.length = 0;
+grow_store2(a4,0,1);
+assertEquals(1, a4[0]);
+assertEquals(undefined, a4[1]);
+assertEquals(undefined, a4[2]);
+assertEquals(undefined, a4[3]);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-163530.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-163530.js
new file mode 100644
index 0000000..7abae14
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-163530.js
@@ -0,0 +1,80 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test materialization of an arguments object with unknown argument
+// values in non-strict mode (length has to be zero).
+(function() {
+ var deoptimize = { deopt:true };
+ var object = {};
+
+ object.a = function A(x, y, z) {
+ assertSame(0, arguments.length);
+ return this.b();
+ };
+
+ object.b = function B() {
+ assertSame(0, arguments.length);
+ deoptimize.deopt;
+ return arguments.length;
+ };
+
+ assertSame(0, object.a());
+ assertSame(0, object.a());
+ %OptimizeFunctionOnNextCall(object.a);
+ assertSame(0, object.a());
+ delete deoptimize.deopt;
+ assertSame(0, object.a());
+})();
+
+
+// Test materialization of an arguments object with unknown argument
+// values in strict mode (length is allowed to exceed stack size).
+(function() {
+ 'use strict';
+ var deoptimize = { deopt:true };
+ var object = {};
+
+ object.a = function A(x, y, z) {
+ assertSame(0, arguments.length);
+ return this.b(1, 2, 3, 4, 5, 6, 7, 8);
+ };
+
+ object.b = function B(a, b, c, d, e, f, g, h) {
+ assertSame(8, arguments.length);
+ deoptimize.deopt;
+ return arguments.length;
+ };
+
+ assertSame(8, object.a());
+ assertSame(8, object.a());
+ %OptimizeFunctionOnNextCall(object.a);
+ assertSame(8, object.a());
+ delete deoptimize.deopt;
+ assertSame(8, object.a());
+})();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-168545.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-168545.js
new file mode 100644
index 0000000..acc065e
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-168545.js
@@ -0,0 +1,34 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var o = {};
+Object.defineProperty(o, "length", { get: function() { throw "bail"; }});
+assertThrows("new Int16Array(o);");
+
+var a = [];
+Object.defineProperty(a, "0", { get: function() { throw "bail"; }});
+assertThrows("new Int16Array(a);");
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-170856.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-170856.js
new file mode 100644
index 0000000..2e73b12
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-170856.js
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+r = new RegExp("a");
+for (var i = 0; i < 100; i++) {
+ r["abc" + i] = i;
+}
+"zzzz".replace(r, "");
+assertEquals(0, r.lastIndex);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-172345.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-172345.js
new file mode 100644
index 0000000..711501c
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-172345.js
@@ -0,0 +1,34 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function f(a,i) {
+ return a[i];
+}
+
+f([1,2,3], "length");
+f([1,2,3], "length");
+f([1,2,3], 2);
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173907.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173907.js
new file mode 100644
index 0000000..9f92fef
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173907.js
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var X = 1.1;
+var K = 0.5;
+
+var O = 0;
+var result = new Float64Array(2);
+
+function spill() {
+ try { } catch (e) { }
+}
+
+function buggy() {
+ var v = X;
+ var phi1 = v + K;
+ var phi2 = v - K;
+
+ spill(); // At this point initial values for phi1 and phi2 are spilled.
+
+ var xmm1 = v;
+ var xmm2 = v*v*v;
+ var xmm3 = v*v*v*v;
+ var xmm4 = v*v*v*v*v;
+ var xmm5 = v*v*v*v*v*v;
+ var xmm6 = v*v*v*v*v*v*v;
+ var xmm7 = v*v*v*v*v*v*v*v;
+ var xmm8 = v*v*v*v*v*v*v*v*v;
+
+ // All registers are blocked and phis for phi1 and phi2 are spilled because
+ // their left (incoming) value is spilled, there are no free registers,
+ // and phis themselves have only ANY-policy uses.
+
+ for (var x = 0; x < 2; x++) {
+ xmm1 += xmm1 * xmm6;
+ xmm2 += xmm1 * xmm5;
+ xmm3 += xmm1 * xmm4;
+ xmm4 += xmm1 * xmm3;
+ xmm5 += xmm1 * xmm2;
+
+ // Now swap values of phi1 and phi2 to create cycle between phis.
+ var t = phi1;
+ phi1 = phi2;
+ phi2 = t;
+ }
+
+ // Now we want to get values of phi1 and phi2. However we would like to
+ // do it in a way that does not produce any uses of phi1&phi2 that have
+ // a register beneficial policy. How? We just hide these uses behind phis.
+ result[0] = (O === 0) ? phi1 : phi2;
+ result[1] = (O !== 0) ? phi1 : phi2;
+}
+
+function test() {
+ buggy();
+ assertArrayEquals([X + K, X - K], result);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(buggy);
+test();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173974.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173974.js
new file mode 100644
index 0000000..905bd60
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-173974.js
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var count = "";
+ count[0] --;
+}
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-178790.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-178790.js
new file mode 100644
index 0000000..57071ea
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-178790.js
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Create a regexp in the form of a?a?...a? so that fully
+// traversing the entire graph would be prohibitively expensive.
+// This should not cause time out.
+var r1 = "";
+for (var i = 0; i < 1000; i++) {
+ r1 += "a?";
+}
+"test".match(RegExp(r1));
+
+var r2 = "";
+for (var i = 0; i < 100; i++) {
+ r2 += "(a?|b?|c?|d?|e?|f?|g?)";
+}
+"test".match(RegExp(r2));
+
+// Create a regexp in the form of ((..(a)a..)a.
+// Compiling it causes EatsAtLeast to reach the maximum
+// recursion depth possible with a given budget.
+// This should not cause a stack overflow.
+var r3 = "a";
+for (var i = 0; i < 1000; i++) {
+ r3 = "(" + r3 + ")a";
+}
+"test".match(RegExp(r3));
+
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-18639.js b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-18639.js
index 23e225a..4f4bb7c 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-18639.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-crbug-18639.js
@@ -27,8 +27,12 @@
// See http://crbug.com/18639
-toString = toString;
-__defineGetter__("z", (0).toLocaleString);
-z;
-z;
-((0).toLocaleString)();
+try {
+ toString = toString;
+ __defineGetter__("z", (0).toLocaleString);
+ z;
+ z;
+ ((0).toLocaleString)();
+} catch (e) {
+ assertInstanceof(e, TypeError);
+} \ No newline at end of file
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-json-stringify-gc.js b/src/3rdparty/v8/test/mjsunit/regress/regress-json-stringify-gc.js
index d732ebc..c0a71bf 100644
--- a/src/3rdparty/v8/test/mjsunit/regress/regress-json-stringify-gc.js
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-json-stringify-gc.js
@@ -37,5 +37,5 @@ for (var i = 0; i < 10000; i++) a.push(new_space_string);
// screw up reading from the correct location.
json1 = JSON.stringify(a);
json2 = JSON.stringify(a);
-assertEquals(json1, json2, "GC caused JSON.stringify to fail.");
+assertTrue(json1 == json2, "GC caused JSON.stringify to fail.");
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-latin-1.js b/src/3rdparty/v8/test/mjsunit/regress/regress-latin-1.js
new file mode 100644
index 0000000..a988ebd
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-latin-1.js
@@ -0,0 +1,90 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+assertEquals(String.fromCharCode(97, 220, 256), 'a' + '\u00DC' + '\u0100');
+assertEquals(String.fromCharCode(97, 220, 256), 'a\u00DC\u0100');
+
+assertEquals(0x80, JSON.stringify("\x80").charCodeAt(1));
+
+assertEquals(['a', 'b', '\xdc'], ['b', '\xdc', 'a'].sort());
+
+assertEquals(['\xfc\xdc', '\xfc'], new RegExp('(\xdc)\\1', 'i').exec('\xfc\xdc'));
+// Same test but for all values in Latin-1 range.
+var total_lo = 0;
+for (var i = 0; i < 0xff; i++) {
+ var base = String.fromCharCode(i);
+ var escaped = base;
+ if (base == '(' || base == ')' || base == '*' || base == '+' ||
+ base == '?' || base == '[' || base == ']' || base == '\\' ||
+ base == '$' || base == '^' || base == '|') {
+ escaped = '\\' + base;
+ }
+ var lo = String.fromCharCode(i + 0x20);
+ base_result = new RegExp('(' + escaped + ')\\1', 'i').exec(base + base);
+ assertEquals( base_result, [base + base, base]);
+ lo_result = new RegExp('(' + escaped + ')\\1', 'i').exec(base + lo);
+ if (base.toLowerCase() == lo) {
+ assertEquals([base + lo, base], lo_result);
+ total_lo++;
+ } else {
+ assertEquals(null, lo_result);
+ }
+}
+// Should have hit the branch for the following char codes:
+// [A-Z], [192-222] but not 215
+assertEquals((90-65+1)+(222-192-1+1), total_lo);
+
+// Latin-1 whitespace character
+assertEquals( 1, +(String.fromCharCode(0xA0) + '1') );
+
+// Latin-1 \W characters
+assertEquals(["+\u00a3", "=="], "+\u00a3==".match(/\W\W/g));
+
+// Latin-1 character that uppercases out of Latin-1.
+assertTrue(/\u0178/i.test('\u00ff'));
+
+// Unicode equivalence
+assertTrue(/\u039c/i.test('\u00b5'));
+assertTrue(/\u039c/i.test('\u03bc'));
+assertTrue(/\u00b5/i.test('\u03bc'));
+// Unicode equivalence ranges
+assertTrue(/[\u039b-\u039d]/i.test('\u00b5'));
+assertFalse(/[^\u039b-\u039d]/i.test('\u00b5'));
+assertFalse(/[\u039b-\u039d]/.test('\u00b5'));
+assertTrue(/[^\u039b-\u039d]/.test('\u00b5'));
+
+// Check a regression in QuoteJsonSlow and WriteQuoteJsonString
+for (var testNumber = 0; testNumber < 2; testNumber++) {
+ var testString = "\xdc";
+ var loopLength = testNumber == 0 ? 0 : 20;
+ for (var i = 0; i < loopLength; i++ ) {
+ testString += testString;
+ }
+ var stringified = JSON.stringify({"test" : testString}, null, 0);
+ var stringifiedExpected = '{"test":"' + testString + '"}';
+ assertEquals(stringifiedExpected, stringified);
+}
diff --git a/src/3rdparty/v8/test/mjsunit/regress/regress-observe-empty-double-array.js b/src/3rdparty/v8/test/mjsunit/regress/regress-observe-empty-double-array.js
new file mode 100644
index 0000000..4b65169
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/regress/regress-observe-empty-double-array.js
@@ -0,0 +1,38 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-observation --allow-natives-syntax
+//
+// Test passes if it does not crash.
+
+arr = [1.1];
+Object.observe(arr, function(){});
+arr.length = 0;
+// TODO(observe): we currently disallow fast elements for observed object.
+// assertTrue(%HasFastDoubleElements(arr));
+// Should not crash
+arr.push(1.1);
diff --git a/src/3rdparty/v8/test/mjsunit/shift-for-integer-div.js b/src/3rdparty/v8/test/mjsunit/shift-for-integer-div.js
new file mode 100644
index 0000000..0fe1262
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/shift-for-integer-div.js
@@ -0,0 +1,59 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function divp4(x) {
+ return x / 4;
+}
+
+for (var i = 0; i < 10000; i+=4) {
+ assertEquals(i >> 2, divp4(i));
+}
+
+assertEquals(0.5, divp4(2));
+
+function divn4(x) {
+ return x / (-4);
+}
+
+for (var i = 0; i < 10000; i+=4) {
+ assertEquals(-(i >> 2), divn4(i));
+}
+
+assertEquals(-0, divn4(0));
+
+
+function divn1(x) {
+ return x / (-1);
+}
+
+for (var i = 0; i < 10000; i++) {
+ assertEquals(-i, divn1(i));
+}
+
+var min_int = -(0x7FFFFFFF)-1;
+assertEquals(-min_int, divn1(min_int));
+
diff --git a/src/3rdparty/v8/test/mjsunit/stack-traces-gc.js b/src/3rdparty/v8/test/mjsunit/stack-traces-gc.js
new file mode 100644
index 0000000..dd878f2
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/stack-traces-gc.js
@@ -0,0 +1,119 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+var fired = [];
+for (var i = 0; i < 100; i++) fired[i] = false;
+
+function getter_function(i) {
+ return %MarkOneShotGetter( function() { fired[i] = true; } );
+}
+
+// Error objects that die young.
+for (var i = 0; i < 100; i++) {
+ var error = new Error();
+ // Replace the getter to observe whether it has been fired,
+ // and disguise it as original getter.
+ var getter = getter_function(i);
+ error.__defineGetter__("stack", getter);
+
+ error = undefined;
+}
+
+gc();
+for (var i = 0; i < 100; i++) {
+ assertFalse(fired[i]);
+}
+
+// Error objects that are kept alive.
+var array = [];
+for (var i = 0; i < 100; i++) {
+ var error = new Error();
+ var getter = getter_function(i);
+ // Replace the getter to observe whether it has been fired,
+ // and disguise it as original getter.
+ error.__defineGetter__("stack", getter);
+
+ array.push(error);
+ error = undefined;
+}
+
+gc();
+// We don't expect all stack traces to be formatted after only one GC.
+assertTrue(fired[0]);
+
+for (var i = 0; i < 10; i++) gc();
+for (var i = 0; i < 100; i++) assertTrue(fired[i]);
+
+// Error objects with custom stack getter.
+var custom_error = new Error();
+var custom_getter_fired = false;
+custom_error.__defineGetter__("stack",
+ function() { custom_getter_fired = true; });
+gc();
+assertFalse(custom_getter_fired);
+
+// Check that formatting caused by GC is not somehow observable.
+var error;
+
+var obj = { foo: function foo() { throw new Error(); } };
+
+try {
+ obj.foo();
+} catch (e) {
+ delete obj.foo;
+ Object.defineProperty(obj, 'foo', {
+ get: function() { assertUnreachable(); }
+ });
+ error = e;
+}
+
+gc();
+
+Object.defineProperty(Array.prototype, '0', {
+ get: function() { assertUnreachable(); }
+});
+
+try {
+ throw new Error();
+} catch (e) {
+ error = e;
+}
+
+gc();
+
+String.prototype.indexOf = function() { assertUnreachable(); };
+String.prototype.lastIndexOf = function() { assertUnreachable(); };
+var obj = { method: function() { throw Error(); } };
+try {
+ obj.method();
+} catch (e) {
+ error = e;
+}
+
+gc();
diff --git a/src/3rdparty/v8/test/mjsunit/stack-traces-overflow.js b/src/3rdparty/v8/test/mjsunit/stack-traces-overflow.js
new file mode 100644
index 0000000..7722e93
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/stack-traces-overflow.js
@@ -0,0 +1,122 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function rec1(a) { rec1(a+1); }
+function rec2(a) { rec3(a+1); }
+function rec3(a) { rec2(a+1); }
+
+// Test stack trace getter and setter.
+try {
+ rec1(0);
+} catch (e) {
+ assertTrue(e.stack.indexOf("rec1") > 0);
+ e.stack = "123";
+ assertEquals("123", e.stack);
+}
+
+// Test setter w/o calling the getter.
+try {
+ rec2(0);
+} catch (e) {
+ assertTrue(e.stack.indexOf("rec2") > 0);
+ assertTrue(e.stack.indexOf("rec3") > 0);
+ e.stack = "123";
+ assertEquals("123", e.stack);
+}
+
+// Test getter to make sure setter does not affect the boilerplate.
+try {
+ rec1(0);
+} catch (e) {
+ assertTrue(e.stack.indexOf("rec1") > 0);
+ assertInstanceof(e, RangeError);
+}
+
+
+// Check setting/getting stack property on the prototype chain.
+function testErrorPrototype(prototype) {
+ var object = {};
+ object.__proto__ = prototype;
+ object.stack = "123";
+ assertEquals("123", object.stack);
+ assertTrue("123" != prototype.stack);
+}
+
+try {
+ rec1(0);
+} catch (e) {
+ e.stack;
+ testErrorPrototype(e);
+}
+
+try {
+ rec1(0);
+} catch (e) {
+ testErrorPrototype(e);
+}
+
+try {
+ throw new Error();
+} catch (e) {
+ testErrorPrototype(e);
+}
+
+Error.stackTraceLimit = 3;
+try {
+ rec1(0);
+} catch (e) {
+ assertEquals(4, e.stack.split('\n').length);
+}
+
+Error.stackTraceLimit = 25.9;
+try {
+ rec1(0);
+} catch (e) {
+ assertEquals(26, e.stack.split('\n').length);
+}
+
+Error.stackTraceLimit = NaN;
+try {
+ rec1(0);
+} catch (e) {
+ assertEquals(1, e.stack.split('\n').length);
+}
+
+Error.stackTraceLimit = "not a number";
+try {
+ rec1(0);
+} catch (e) {
+ assertEquals(undefined, e.stack);
+}
+
+Error.stackTraceLimit = 3;
+Error = ""; // Overwrite Error in the global object.
+try {
+ rec1(0);
+} catch (e) {
+ assertEquals(4, e.stack.split('\n').length);
+}
diff --git a/src/3rdparty/v8/test/mjsunit/stack-traces.js b/src/3rdparty/v8/test/mjsunit/stack-traces.js
index 438eec9..b5d58fa 100644
--- a/src/3rdparty/v8/test/mjsunit/stack-traces.js
+++ b/src/3rdparty/v8/test/mjsunit/stack-traces.js
@@ -288,4 +288,42 @@ testOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
}, "QuickSort");
// Omitted because ADD from runtime.js is non-native builtin.
-testOmittedBuiltin(function(){ thrower + 2; }, "ADD"); \ No newline at end of file
+testOmittedBuiltin(function(){ thrower + 2; }, "ADD");
+
+var error = new Error();
+error.toString = function() { assertUnreachable(); };
+error.stack;
+
+error = new Error();
+error.name = { toString: function() { assertUnreachable(); }};
+error.message = { toString: function() { assertUnreachable(); }};
+error.stack;
+
+error = new Error();
+Array.prototype.push = function(x) { assertUnreachable(); };
+Array.prototype.join = function(x) { assertUnreachable(); };
+error.stack;
+
+var fired = false;
+error = new Error({ toString: function() { fired = true; } });
+assertTrue(fired);
+error.stack;
+assertTrue(fired);
+
+// Check that throwing exception in a custom stack trace formatting function
+// does not lead to recursion.
+Error.prepareStackTrace = function() { throw new Error("abc"); };
+var message;
+try {
+ throw new Error();
+} catch (e) {
+ message = e.message;
+}
+
+assertEquals("abc", message);
+
+// Test that modifying Error.prepareStackTrace by itself works.
+Error.prepareStackTrace = function() { Error.prepareStackTrace = "custom"; };
+new Error();
+
+assertEquals("custom", Error.prepareStackTrace);
diff --git a/src/3rdparty/v8/test/mjsunit/strict-mode.js b/src/3rdparty/v8/test/mjsunit/strict-mode.js
index 9c9bdfd..5fb404a 100644
--- a/src/3rdparty/v8/test/mjsunit/strict-mode.js
+++ b/src/3rdparty/v8/test/mjsunit/strict-mode.js
@@ -1141,9 +1141,9 @@ function CheckPillDescriptor(func, name) {
function strict() {
"use strict";
- return_my_caller();
+ return return_my_caller();
}
- assertThrows(strict, TypeError);
+ assertSame(null, strict());
function non_strict() {
return return_my_caller();
@@ -1155,32 +1155,57 @@ function CheckPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerPill() {
function strict(n) {
"use strict";
- non_strict(n);
+ return non_strict(n);
}
function recurse(n, then) {
if (n > 0) {
- recurse(n - 1);
+ return recurse(n - 1, then);
} else {
return then();
}
}
function non_strict(n) {
- recurse(n, function() { non_strict.caller; });
+ return recurse(n, function() { return non_strict.caller; });
}
function test(n) {
- try {
- recurse(n, function() { strict(n); });
- } catch(e) {
- return e instanceof TypeError;
+ return recurse(n, function() { return strict(n); });
+ }
+
+ for (var i = 0; i < 10; i ++) {
+ assertSame(null, test(i));
+ }
+})();
+
+
+(function TestNonStrictFunctionCallerDescriptorPill() {
+ function strict(n) {
+ "use strict";
+ return non_strict(n);
+ }
+
+ function recurse(n, then) {
+ if (n > 0) {
+ return recurse(n - 1, then);
+ } else {
+ return then();
}
- return false;
+ }
+
+ function non_strict(n) {
+ return recurse(n, function() {
+ return Object.getOwnPropertyDescriptor(non_strict, "caller").value;
+ });
+ }
+
+ function test(n) {
+ return recurse(n, function() { return strict(n); });
}
for (var i = 0; i < 10; i ++) {
- assertEquals(test(i), true);
+ assertSame(null, test(i));
}
})();
diff --git a/src/3rdparty/v8/test/mjsunit/string-natives.js b/src/3rdparty/v8/test/mjsunit/string-natives.js
new file mode 100644
index 0000000..b1ec875
--- /dev/null
+++ b/src/3rdparty/v8/test/mjsunit/string-natives.js
@@ -0,0 +1,72 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+function test() {
+ var s1 = %NewString(26, true);
+ for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, i+65);
+ assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
+ s1 = %TruncateString(s1, 13);
+ assertEquals("ABCDEFGHIJKLM", s1);
+
+ var s2 = %NewString(26, false);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, i+65);
+ assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
+ s2 = %TruncateString(s1, 13);
+ assertEquals("ABCDEFGHIJKLM", s2);
+
+ var s3 = %NewString(26, false);
+ for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s3, i, i+1000);
+ for (i = 0; i < 26; i++) assertEquals(s3[i], String.fromCharCode(i+1000));
+
+ var a = [];
+ for (var i = 0; i < 1000; i++) {
+ var s = %NewString(10000, i % 2 == 1);
+ a.push(s);
+ }
+
+ gc();
+
+ for (var i = 0; i < 1000; i++) {
+ assertEquals(10000, a[i].length);
+ a[i] = %TruncateString(a[i], 5000);
+ }
+
+ gc();
+
+ for (var i = 0; i < 1000; i++) {
+ assertEquals(5000, a[i].length);
+ }
+}
+
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
+
diff --git a/src/3rdparty/v8/test/mjsunit/string-replace.js b/src/3rdparty/v8/test/mjsunit/string-replace.js
index 6b022df..de92115 100644
--- a/src/3rdparty/v8/test/mjsunit/string-replace.js
+++ b/src/3rdparty/v8/test/mjsunit/string-replace.js
@@ -212,3 +212,64 @@ var str = 'She sells seashells by the seashore.';
var re = /sh/g;
assertEquals('She sells sea$schells by the sea$schore.',
str.replace(re,"$$" + 'sch'))
+
+
+var replace_obj = { length: 0, toString: function() { return "x"; }};
+assertEquals("axc", "abc".replace(/b/, replace_obj));
+assertEquals("axc", "abc".replace(/b/g, replace_obj));
+
+var search_obj = { length: 1, toString: function() { return "b"; }};
+assertEquals("axc", "abc".replace(search_obj, function() { return "x"; }));
+
+var side_effect_flag = false;
+var replace_obj_side_effects = {
+ toString: function() { side_effect_flag = true; return "x" }
+}
+assertEquals("abc", "abc".replace(/z/g, replace_obj_side_effects));
+assertTrue(side_effect_flag); // Side effect triggers even without a match.
+
+var regexp99pattern = "";
+var subject = "";
+for (var i = 0; i < 99; i++) {
+ regexp99pattern += "(.)";
+ subject += String.fromCharCode(i + 24);
+}
+
+function testIndices99(re) {
+ // Test $1 .. $99
+ for (var i = 1; i < 100; i++) {
+ assertEquals(String.fromCharCode(i + 23),
+ subject.replace(re, "$" + i));
+ }
+
+ // Test $01 .. $09
+ for (var i = 1; i < 10; i++) {
+ assertEquals(String.fromCharCode(i + 23),
+ subject.replace(re, "$0" + i));
+ }
+
+ assertEquals("$0", subject.replace(re, "$0"));
+ assertEquals("$00", subject.replace(re, "$00"));
+ assertEquals(String.fromCharCode(10 + 23) + "0",
+ subject.replace(re, "$100"));
+}
+
+testIndices99(new RegExp(regexp99pattern));
+testIndices99(new RegExp(regexp99pattern, "g"));
+
+var regexp59pattern = "";
+for (var i = 0; i < 59; i++) regexp59pattern += "(.)";
+
+function testIndices59(re) {
+ // Test $60 .. $99. Captures reach up to 59. Per spec, how to deal
+ // with this is implementation-dependent. We interpret $60 as $6
+ // followed by "0", $61 as $6, followed by "1" and so on.
+ var tail = subject.substr(59);
+ for (var i = 60; i < 100; i++) {
+ assertEquals(String.fromCharCode(i / 10 + 23) + (i % 10) + tail,
+ subject.replace(re, "$" + i));
+ }
+}
+
+testIndices59(new RegExp(regexp59pattern));
+testIndices59(new RegExp(regexp59pattern, "g"));
diff --git a/src/3rdparty/v8/test/mjsunit/string-split.js b/src/3rdparty/v8/test/mjsunit/string-split.js
index d8412f0..1308244 100644
--- a/src/3rdparty/v8/test/mjsunit/string-split.js
+++ b/src/3rdparty/v8/test/mjsunit/string-split.js
@@ -66,6 +66,23 @@ assertArrayEquals(["div", "#i", "d", ".class"], "div#id.class".split(/(?=[d#.])/
assertArrayEquals(["a", "b", "c"], "abc".split(/(?=.)/));
+assertArrayEquals(["Wenige", "sind", "auserwählt."],
+ "Wenige sind auserwählt.".split(" "));
+
+assertArrayEquals([], "Wenige sind auserwählt.".split(" ", 0));
+
+assertArrayEquals(["Wenige"], "Wenige sind auserwählt.".split(" ", 1));
+
+assertArrayEquals(["Wenige", "sind"], "Wenige sind auserwählt.".split(" ", 2));
+
+assertArrayEquals(["Wenige", "sind", "auserwählt."],
+ "Wenige sind auserwählt.".split(" ", 3));
+
+assertArrayEquals(["Wenige sind auserw", "hlt."],
+ "Wenige sind auserwählt.".split("ä"));
+
+assertArrayEquals(["Wenige sind ", "."],
+ "Wenige sind auserwählt.".split("auserwählt"));
/* "ab".split(/((?=.))/)
*
diff --git a/src/3rdparty/v8/test/mjsunit/testcfg.py b/src/3rdparty/v8/test/mjsunit/testcfg.py
index c8b972c..00d4500 100644
--- a/src/3rdparty/v8/test/mjsunit/testcfg.py
+++ b/src/3rdparty/v8/test/mjsunit/testcfg.py
@@ -75,7 +75,7 @@ class MjsunitTestSuite(testsuite.TestSuite):
for f in files_list ]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
if SELF_SCRIPT_PATTERN.search(source):
- env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename]
+ env = ["-e", "TEST_FILE_NAME=\"%s\"" % testfilename.replace("\\", "\\\\")]
files = env + files
files.append(os.path.join(self.root, "mjsunit.js"))
files.append(testfilename)
diff --git a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test-func-info.log b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
index e4015d4..5e64dc0 100644
--- a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
+++ b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test-func-info.log
@@ -2,10 +2,10 @@ shared-library,"shell",0x08048000,0x081ee000
shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000
shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000
profiler,"begin",1
-code-creation,Stub,0x424260,348,"CompareStub_GE"
-code-creation,LazyCompile,0x2a8100,18535,"DrawQube 3d-cube.js:188",0xf43abcac,
-code-creation,LazyCompile,0x480100,3908,"DrawLine 3d-cube.js:17",0xf43abc50,
-tick,0x424284,0xbfffeea0,0,0x480600,0,0x2aaaa5
-tick,0x42429f,0xbfffed88,0,0x480600,0,0x2aacb4
-tick,0x48063d,0xbfffec7c,0,0x2d0f7c,0,0x2aaec6
+code-creation,Stub,0,0x424260,348,"CompareStub_GE"
+code-creation,LazyCompile,0,0x2a8100,18535,"DrawQube 3d-cube.js:188",0xf43abcac,
+code-creation,LazyCompile,0,0x480100,3908,"DrawLine 3d-cube.js:17",0xf43abc50,
+tick,0x424284,0xbfffeea0,0,0,0x480600,0,0x2aaaa5
+tick,0x42429f,0xbfffed88,0,0,0x480600,0,0x2aacb4
+tick,0x48063d,0xbfffec7c,0,0,0x2d0f7c,0,0x2aaec6
profiler,"end"
diff --git a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test.log b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test.log
index db8be79..5ddad89 100644
--- a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test.log
+++ b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor-test.log
@@ -2,24 +2,24 @@ shared-library,"shell",0x08048000,0x081ee000
shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000
shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000
profiler,"begin",1
-code-creation,Stub,0xf540a100,474,"CEntryStub"
-code-creation,Script,0xf541cd80,736,"exp.js"
-code-creation,Stub,0xf541d0e0,47,"RuntimeStub_Math_exp"
-code-creation,LazyCompile,0xf541d120,145,"exp native math.js:41"
+code-creation,Stub,0,0xf540a100,474,"CEntryStub"
+code-creation,Script,0,0xf541cd80,736,"exp.js"
+code-creation,Stub,0,0xf541d0e0,47,"RuntimeStub_Math_exp"
+code-creation,LazyCompile,0,0xf541d120,145,"exp native math.js:41"
function-creation,0xf441d280,0xf541d120
-code-creation,LoadIC,0xf541d280,117,"j"
-code-creation,LoadIC,0xf541d360,63,"i"
-tick,0x80f82d1,0xffdfe880,0,0,0,0xf541ce5c
-tick,0x80f89a1,0xffdfecf0,0,0,0,0xf541ce5c
-tick,0x8123b5c,0xffdff1a0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0x8123b65,0xffdff1a0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf541d2be,0xffdff1e4,0,0,0
-tick,0xf541d320,0xffdff1dc,0,0,0
-tick,0xf541d384,0xffdff1d8,0,0,0
-tick,0xf7db94da,0xffdff0ec,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7db951c,0xffdff0f0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbc508,0xffdff14c,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbff21,0xffdff198,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7edec90,0xffdff0ec,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xffffe402,0xffdff488,0,0,0
+code-creation,LoadIC,0,0xf541d280,117,"j"
+code-creation,LoadIC,0,0xf541d360,63,"i"
+tick,0x80f82d1,0xffdfe880,0,0,0,0,0xf541ce5c
+tick,0x80f89a1,0xffdfecf0,0,0,0,0,0xf541ce5c
+tick,0x8123b5c,0xffdff1a0,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0x8123b65,0xffdff1a0,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf541d2be,0xffdff1e4,0,0,0,0
+tick,0xf541d320,0xffdff1dc,0,0,0,0
+tick,0xf541d384,0xffdff1d8,0,0,0,0
+tick,0xf7db94da,0xffdff0ec,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7db951c,0xffdff0f0,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbc508,0xffdff14c,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbff21,0xffdff198,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7edec90,0xffdff0ec,0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xffffe402,0xffdff488,0,0,0,0
profiler,"end"
diff --git a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor.js b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor.js
index c48d9f3..00e8f6e 100644
--- a/src/3rdparty/v8/test/mjsunit/tools/tickprocessor.js
+++ b/src/3rdparty/v8/test/mjsunit/tools/tickprocessor.js
@@ -380,7 +380,10 @@ function driveTickProcessorTest(
separateIc,
TickProcessor.CALL_GRAPH_SIZE,
ignoreUnknown,
- stateFilter);
+ stateFilter,
+ undefined,
+ "0",
+ "auto,auto");
var pm = new PrintMonitor(testsPath + refOutput);
tp.processLogFileInTest(testsPath + logInput);
tp.printStatistics();
diff --git a/src/3rdparty/v8/test/mjsunit/uri.js b/src/3rdparty/v8/test/mjsunit/uri.js
index 178ff1f..fae349f 100644
--- a/src/3rdparty/v8/test/mjsunit/uri.js
+++ b/src/3rdparty/v8/test/mjsunit/uri.js
@@ -76,3 +76,15 @@ assertEquals(cc8_2, decodeURI(encodeURI(s8)).charCodeAt(1));
assertEquals(cc9_1, decodeURI(encodeURI(s9)).charCodeAt(0));
assertEquals(cc9_2, decodeURI(encodeURI(s9)).charCodeAt(1));
assertEquals(cc10, decodeURI(encodeURI(s10)).charCodeAt(0));
+
+assertEquals("", decodeURI(""));
+assertEquals("", encodeURI(""));
+
+function test(string) {
+ assertEquals(string, decodeURI(encodeURI(string)));
+}
+
+test("\u1234\u0123\uabcd");
+test("abcd");
+test("ab<\u1234\u0123");
+test("ab\u1234<\u0123");
diff --git a/src/3rdparty/v8/test/mozilla/mozilla.status b/src/3rdparty/v8/test/mozilla/mozilla.status
index 4f2fbde..9878730 100644
--- a/src/3rdparty/v8/test/mozilla/mozilla.status
+++ b/src/3rdparty/v8/test/mozilla/mozilla.status
@@ -70,7 +70,7 @@ ecma_3/Date/15.9.3.2-1: SKIP
js1_2/function/Number: SKIP
# TODO(2018): Temporarily allow timeout in debug mode.
-js1_5/GC/regress-203278-2: PASS || TIMEOUT if $mode == debug
+js1_5/GC/regress-203278-2: PASS || (TIMEOUT || FAIL) if $mode == debug
##################### SLOW TESTS #####################
@@ -106,7 +106,6 @@ js1_5/extensions/regress-365527: PASS || TIMEOUT if $mode == debug
js1_5/Regress/regress-280769-3: PASS || FAIL if $mode == debug
js1_5/Regress/regress-203278-1: PASS || FAIL if $mode == debug
-js1_5/GC/regress-203278-2: PASS || FAIL if $mode == debug
js1_5/Regress/regress-244470: PASS || FAIL if $mode == debug
ecma_3/RegExp/regress-209067: PASS || FAIL if $mode == debug
js1_5/GC/regress-278725: PASS || FAIL if $mode == debug
@@ -127,7 +126,6 @@ ecma/Date/15.9.2.2-6: PASS || FAIL
# 1026139: These date tests fail on arm and mips
ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
-ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel)
# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
@@ -179,7 +177,6 @@ ecma/Date/15.9.5.34-1: PASS || FAIL
# These tests sometimes pass (in particular on Windows). They build up
# a lot of stuff on the stack, which normally causes a stack overflow,
# but sometimes it makes it through?
-js1_5/Regress/regress-290575: PASS || FAIL
js1_5/Regress/regress-98901: PASS || FAIL
@@ -212,6 +209,9 @@ js1_5/Array/regress-101964: PASS || FAIL if $mode == debug
# This section is for tests that fail in both V8 and JSC. Thus they
# have been determined to be incompatible between Mozilla and V8/JSC.
+# toPrecision argument restricted to range 1..21 in JSC/V8 and ECMA-262
+js1_5/Regress/regress-452346: FAIL_OK
+
# Fail because it calls builtins as functions and do not expect the
# builtin to have undefined as the receiver.
ecma/String/15.5.4.6-2: FAIL_OK
@@ -245,13 +245,6 @@ js1_5/Function/regress-338121-03: FAIL_OK
# Expectes 'prototype' property of functions to be enumerable.
js1_5/Function/10.1.6-01: FAIL_OK
-# toPrecision argument restricted to range 1..21 in JSC/V8
-js1_5/Regress/regress-452346: FAIL_OK
-ecma_3/Number/15.7.4.7-1: FAIL_OK
-
-# toExponential argument restricted to range 0..20 in JSC/V8
-ecma_3/Number/15.7.4.6-1: FAIL_OK
-
#:=== RegExp:===
# We don't match the syntax error message of Mozilla for invalid
# RegExp flags.
@@ -402,15 +395,6 @@ js1_5/decompilation/regress-460116-02: FAIL_OK
js1_5/decompilation/regress-460501: FAIL_OK
js1_5/decompilation/regress-460116-03: FAIL_OK
js1_5/decompilation/regress-461110: FAIL_OK
-js1_5/decompilation/regress-456964-01: FAIL_OK
-js1_5/decompilation/regress-437288-02: FAIL_OK
-js1_5/decompilation/regress-457824: FAIL_OK
-js1_5/decompilation/regress-460116-01: FAIL_OK
-js1_5/decompilation/regress-460116-02: FAIL_OK
-js1_5/decompilation/regress-460116-03: FAIL_OK
-js1_5/decompilation/regress-460501: FAIL_OK
-js1_5/decompilation/regress-461110: FAIL_OK
-
# Tests that use uneval. Non-ECMA.
@@ -567,7 +551,7 @@ ecma_3/RegExp/15.10.2-1: PASS || FAIL_OK
# This test requires a failure if we try to compile a function with more
# than 65536 arguments. This seems to be a Mozilla restriction.
-js1_5/Regress/regress-290575: FAIL_OK
+js1_5/Regress/regress-290575: PASS || FAIL_OK
# Fails because of the way function declarations are
@@ -761,7 +745,6 @@ js1_5/extensions/toLocaleFormat-02: FAIL_OK
js1_5/extensions/regress-330569: TIMEOUT
js1_5/extensions/regress-351448: TIMEOUT
-js1_5/extensions/regress-342960: FAIL_OK || TIMEOUT if $mode == debug
# In the 64-bit version, this test takes longer to run out of memory
# than it does in the 32-bit version when attempting to generate a huge
# error message in debug mode.
@@ -829,10 +812,6 @@ js1_5/decompilation/regress-460870: PASS || FAIL
[ $arch == arm ]
-# Times out and print so much output that we need to skip it to not
-# hang the builder.
-js1_5/extensions/regress-342960: SKIP
-
# BUG(3251229): Times out when running new crankshaft test script.
ecma_3/RegExp/regress-311414: SKIP
ecma/Date/15.9.5.8: SKIP
@@ -852,10 +831,6 @@ js1_5/GC/regress-203278-2: PASS || TIMEOUT
[ $arch == mipsel ]
-# Times out and print so much output that we need to skip it to not
-# hang the builder.
-js1_5/extensions/regress-342960: SKIP
-
# BUG(3251229): Times out when running new crankshaft test script.
ecma_3/RegExp/regress-311414: SKIP
ecma/Date/15.9.5.8: SKIP
diff --git a/src/3rdparty/v8/test/test262/README b/src/3rdparty/v8/test/test262/README
index 59e7f5e..1ddbc70 100644
--- a/src/3rdparty/v8/test/test262/README
+++ b/src/3rdparty/v8/test/test262/README
@@ -4,11 +4,11 @@ tests from
http://hg.ecmascript.org/tests/test262
-at revision 334 as 'data' in this directory. Using later version
+at revision 360 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
-hg clone -r 334 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 360 http://hg.ecmascript.org/tests/test262 data
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/src/3rdparty/v8/test/test262/test262.status b/src/3rdparty/v8/test/test262/test262.status
index 06b43c7..4910939 100644
--- a/src/3rdparty/v8/test/test262/test262.status
+++ b/src/3rdparty/v8/test/test262/test262.status
@@ -30,23 +30,26 @@ def FAIL_OK = FAIL, OKAY
############################### BUGS ###################################
-# '__proto__' should be treated as a normal property in JSON.
-S15.12.2_A1: FAIL
-
# Sequencing of getter side effects on receiver and argument properties
# is wrong. The receiver callback should be called before any arguments
# are evaluated.
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
11.2.3-3_3: FAIL
+# Strings that are considered canonically equivalent by the Unicode standard
+# return a non-zero value on String.prototype.localeCompare calls.
+# V8 Bug: http://code.google.com/p/v8/issues/detail?id=2413
+15.5.4.9_CE: FAIL
+
##################### DELIBERATE INCOMPATIBILITIES #####################
-# This tests precision of Math.tan and Math.sin. The implementation for those
+# This tests precision of Math functions. The implementation for those
# trigonometric functions are platform/compiler dependent. Furthermore, the
# expectation values by far deviates from the actual result given by an
# arbitrary-precision calculator, making those tests partly bogus.
-S15.8.2.16_A7: PASS || FAIL_OK
-S15.8.2.18_A7: PASS || FAIL_OK
+S15.8.2.8_A6: PASS || FAIL_OK # Math.exp (less precise with --fast-math)
+S15.8.2.16_A7: PASS || FAIL_OK # Math.sin
+S15.8.2.18_A7: PASS || FAIL_OK # Math.tan
# Linux for ia32 (and therefore simulators) default to extended 80 bit floating
# point formats, so these tests checking 64-bit FP precision fail. The other
diff --git a/src/3rdparty/v8/test/test262/testcfg.py b/src/3rdparty/v8/test/test262/testcfg.py
index 875a4e5..f937442 100644
--- a/src/3rdparty/v8/test/test262/testcfg.py
+++ b/src/3rdparty/v8/test/test262/testcfg.py
@@ -36,10 +36,11 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-TEST_262_ARCHIVE_REVISION = "fb327c439e20" # This is the r334 revision.
-TEST_262_ARCHIVE_MD5 = "307acd166ec34629592f240dc12d57ed"
+TEST_262_ARCHIVE_REVISION = "53c4ade82d14" # This is the r360 revision.
+TEST_262_ARCHIVE_MD5 = "5fa4918b00e5d60e57bdd3c05deaeb0c"
TEST_262_URL = "http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2"
-TEST_262_HARNESS = ["sta.js"]
+TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js"]
+TEST_262_SKIP = ["intl402"]
class Test262TestSuite(testsuite.TestSuite):
@@ -59,6 +60,8 @@ class Test262TestSuite(testsuite.TestSuite):
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
+ for skipped in [x for x in dirs if x in TEST_262_SKIP]:
+ dirs.remove(skipped)
dirs.sort()
files.sort()
for filename in files:
@@ -176,6 +179,8 @@ class Test262TestConfiguration(test.TestConfiguration):
for root, dirs, files in os.walk(testroot):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
+ for skipped in [x for x in dirs if x in TEST_262_SKIP]:
+ dirs.remove(skipped)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
diff --git a/src/3rdparty/v8/tools/disasm.py b/src/3rdparty/v8/tools/disasm.py
index 681b425..6fa81ca 100644
--- a/src/3rdparty/v8/tools/disasm.py
+++ b/src/3rdparty/v8/tools/disasm.py
@@ -53,12 +53,12 @@ _ARCH_MAP = {
}
-def GetDisasmLines(filename, offset, size, arch, inplace):
+def GetDisasmLines(filename, offset, size, arch, inplace, arch_flags=""):
tmp_name = None
if not inplace:
# Create a temporary file containing a copy of the code.
assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch
- arch_flags = _ARCH_MAP[arch]
+ arch_flags = arch_flags + " " + _ARCH_MAP[arch]
tmp_name = tempfile.mktemp(".v8code")
command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \
"%s %s -D -b binary %s %s" % (
@@ -66,8 +66,8 @@ def GetDisasmLines(filename, offset, size, arch, inplace):
OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags,
tmp_name)
else:
- command = "%s %s --start-address=%d --stop-address=%d -d %s " % (
- OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS),
+ command = "%s %s %s --start-address=%d --stop-address=%d -d %s " % (
+ OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags,
offset,
offset + size,
filename)
diff --git a/src/3rdparty/v8/tools/gen-postmortem-metadata.py b/src/3rdparty/v8/tools/gen-postmortem-metadata.py
index 71f58bf..0acb658 100644
--- a/src/3rdparty/v8/tools/gen-postmortem-metadata.py
+++ b/src/3rdparty/v8/tools/gen-postmortem-metadata.py
@@ -76,16 +76,15 @@ consts_misc = [
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
+ { 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
- { 'name': 'prop_idx_transitions',
- 'value': 'DescriptorArray::kTransitionsIndex' },
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'FIELD' },
{ 'name': 'prop_type_first_phantom',
- 'value': 'MAP_TRANSITION' },
+ 'value': 'TRANSITION' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
@@ -107,14 +106,13 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
'Map, instance_attributes, int, kInstanceAttributesOffset',
- 'Map, instance_descriptors, int, kInstanceDescriptorsOrBitField3Offset',
'Map, inobject_properties, int, kInObjectPropertiesOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
- 'SeqAsciiString, chars, char, kHeaderSize',
+ 'SeqOneByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
@@ -128,7 +126,7 @@ extras_accessors = [
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
- 'SeqAsciiString', 'SharedFunctionInfo'
+ 'SeqOneByteString', 'SharedFunctionInfo'
];
@@ -293,7 +291,7 @@ def load_objects():
cctype.find('Sliced') == -1):
if (cctype.find('Ascii') != -1):
cctype = re.sub('AsciiString$',
- 'SeqAsciiString', cctype);
+ 'SeqOneByteString', cctype);
else:
cctype = re.sub('String$',
'SeqString', cctype);
diff --git a/src/3rdparty/v8/tools/grokdump.py b/src/3rdparty/v8/tools/grokdump.py
index 46ead5e..603453a 100755
--- a/src/3rdparty/v8/tools/grokdump.py
+++ b/src/3rdparty/v8/tools/grokdump.py
@@ -296,6 +296,42 @@ MINIDUMP_CONTEXT_X86 = Descriptor([
MD_CONTEXT_X86_EXTENDED_REGISTERS))
])
+MD_CONTEXT_ARM = 0x40000000
+MD_CONTEXT_ARM_INTEGER = (MD_CONTEXT_ARM | 0x00000002)
+MD_CONTEXT_ARM_FLOATING_POINT = (MD_CONTEXT_ARM | 0x00000004)
+MD_FLOATINGSAVEAREA_ARM_FPR_COUNT = 32
+MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT = 8
+
+MINIDUMP_FLOATING_SAVE_AREA_ARM = Descriptor([
+ ("fpscr", ctypes.c_uint64),
+ ("regs", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPR_COUNT),
+ ("extra", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT)
+])
+
+MINIDUMP_CONTEXT_ARM = Descriptor([
+ ("context_flags", ctypes.c_uint32),
+ # MD_CONTEXT_ARM_INTEGER.
+ ("r0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r4", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r5", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r8", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r9", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r10", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r11", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("r12", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("sp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("lr", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("pc", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+ ("cpsr", ctypes.c_uint32),
+ ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_ARM.ctype,
+ MD_CONTEXT_ARM_FLOATING_POINT))
+])
+
MD_CONTEXT_AMD64 = 0x00100000
MD_CONTEXT_AMD64_CONTROL = (MD_CONTEXT_AMD64 | 0x00000001)
MD_CONTEXT_AMD64_INTEGER = (MD_CONTEXT_AMD64 | 0x00000002)
@@ -429,6 +465,7 @@ MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
])
MD_CPU_ARCHITECTURE_X86 = 0
+MD_CPU_ARCHITECTURE_ARM = 5
MD_CPU_ARCHITECTURE_AMD64 = 9
class FuncSymbol:
@@ -481,7 +518,9 @@ class MinidumpReader(object):
system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
self.minidump, d.location.rva)
self.arch = system_info.processor_architecture
- assert self.arch in [MD_CPU_ARCHITECTURE_AMD64, MD_CPU_ARCHITECTURE_X86]
+ assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
+ MD_CPU_ARCHITECTURE_ARM,
+ MD_CPU_ARCHITECTURE_X86]
assert not self.arch is None
for d in directories:
@@ -496,6 +535,9 @@ class MinidumpReader(object):
elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
self.exception_context = MINIDUMP_CONTEXT_AMD64.Read(
self.minidump, self.exception.thread_context.rva)
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ self.exception_context = MINIDUMP_CONTEXT_ARM.Read(
+ self.minidump, self.exception.thread_context.rva)
DebugPrint(self.exception_context)
elif d.stream_type == MD_THREAD_LIST_STREAM:
thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
@@ -541,6 +583,8 @@ class MinidumpReader(object):
def ReadUIntPtr(self, address):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.ReadU64(address)
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return self.ReadU32(address)
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.ReadU32(address)
@@ -551,6 +595,8 @@ class MinidumpReader(object):
def _ReadWord(self, location):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return ctypes.c_uint64.from_buffer(self.minidump, location).value
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return ctypes.c_uint32.from_buffer(self.minidump, location).value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return ctypes.c_uint32.from_buffer(self.minidump, location).value
@@ -647,18 +693,29 @@ class MinidumpReader(object):
return None
def GetDisasmLines(self, address, size):
+ def CountUndefinedInstructions(lines):
+ pattern = "<UNDEFINED>"
+ return sum([line.count(pattern) for (ignore, line) in lines])
+
location = self.FindLocation(address)
if location is None: return []
arch = None
+ possible_objdump_flags = [""]
if self.arch == MD_CPU_ARCHITECTURE_X86:
arch = "ia32"
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ arch = "arm"
+ possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
arch = "x64"
- return disasm.GetDisasmLines(self.minidump_name,
- location,
- size,
- arch,
- False)
+ results = [ disasm.GetDisasmLines(self.minidump_name,
+ location,
+ size,
+ arch,
+ False,
+ objdump_flags)
+ for objdump_flags in possible_objdump_flags ]
+ return min(results, key=CountUndefinedInstructions)
def Dispose(self):
@@ -668,24 +725,32 @@ class MinidumpReader(object):
def ExceptionIP(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.exception_context.rip
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return self.exception_context.pc
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.eip
def ExceptionSP(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.exception_context.rsp
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return self.exception_context.sp
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.esp
def FormatIntPtr(self, value):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return "%016x" % value
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return "%08x" % value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return "%08x" % value
def PointerSize(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return 8
+ elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+ return 4
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return 4
@@ -804,40 +869,43 @@ INSTANCE_TYPES = {
142: "EXTERNAL_FLOAT_ARRAY_TYPE",
144: "EXTERNAL_PIXEL_ARRAY_TYPE",
146: "FILLER_TYPE",
- 147: "ACCESSOR_INFO_TYPE",
- 148: "ACCESSOR_PAIR_TYPE",
- 149: "ACCESS_CHECK_INFO_TYPE",
- 150: "INTERCEPTOR_INFO_TYPE",
- 151: "CALL_HANDLER_INFO_TYPE",
- 152: "FUNCTION_TEMPLATE_INFO_TYPE",
- 153: "OBJECT_TEMPLATE_INFO_TYPE",
- 154: "SIGNATURE_INFO_TYPE",
- 155: "TYPE_SWITCH_INFO_TYPE",
- 156: "SCRIPT_TYPE",
- 157: "CODE_CACHE_TYPE",
- 158: "POLYMORPHIC_CODE_CACHE_TYPE",
- 159: "TYPE_FEEDBACK_INFO_TYPE",
- 160: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 163: "FIXED_ARRAY_TYPE",
+ 147: "DECLARED_ACCESSOR_DESCRIPTOR_TYPE",
+ 148: "DECLARED_ACCESSOR_INFO_TYPE",
+ 149: "EXECUTABLE_ACCESSOR_INFO_TYPE",
+ 150: "ACCESSOR_PAIR_TYPE",
+ 151: "ACCESS_CHECK_INFO_TYPE",
+ 152: "INTERCEPTOR_INFO_TYPE",
+ 153: "CALL_HANDLER_INFO_TYPE",
+ 154: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 155: "OBJECT_TEMPLATE_INFO_TYPE",
+ 156: "SIGNATURE_INFO_TYPE",
+ 157: "TYPE_SWITCH_INFO_TYPE",
+ 158: "ALLOCATION_SITE_INFO_TYPE",
+ 159: "SCRIPT_TYPE",
+ 160: "CODE_CACHE_TYPE",
+ 161: "POLYMORPHIC_CODE_CACHE_TYPE",
+ 162: "TYPE_FEEDBACK_INFO_TYPE",
+ 163: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 166: "FIXED_ARRAY_TYPE",
145: "FIXED_DOUBLE_ARRAY_TYPE",
- 164: "SHARED_FUNCTION_INFO_TYPE",
- 165: "JS_MESSAGE_OBJECT_TYPE",
- 168: "JS_VALUE_TYPE",
- 169: "JS_DATE_TYPE",
- 170: "JS_OBJECT_TYPE",
- 171: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 172: "JS_MODULE_TYPE",
- 173: "JS_GLOBAL_OBJECT_TYPE",
- 174: "JS_BUILTINS_OBJECT_TYPE",
- 175: "JS_GLOBAL_PROXY_TYPE",
- 176: "JS_ARRAY_TYPE",
- 167: "JS_PROXY_TYPE",
- 179: "JS_WEAK_MAP_TYPE",
- 180: "JS_REGEXP_TYPE",
- 181: "JS_FUNCTION_TYPE",
- 166: "JS_FUNCTION_PROXY_TYPE",
- 161: "DEBUG_INFO_TYPE",
- 162: "BREAK_POINT_INFO_TYPE",
+ 167: "SHARED_FUNCTION_INFO_TYPE",
+ 168: "JS_MESSAGE_OBJECT_TYPE",
+ 171: "JS_VALUE_TYPE",
+ 172: "JS_DATE_TYPE",
+ 173: "JS_OBJECT_TYPE",
+ 174: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 175: "JS_MODULE_TYPE",
+ 176: "JS_GLOBAL_OBJECT_TYPE",
+ 177: "JS_BUILTINS_OBJECT_TYPE",
+ 178: "JS_GLOBAL_PROXY_TYPE",
+ 179: "JS_ARRAY_TYPE",
+ 170: "JS_PROXY_TYPE",
+ 182: "JS_WEAK_MAP_TYPE",
+ 183: "JS_REGEXP_TYPE",
+ 184: "JS_FUNCTION_TYPE",
+ 169: "JS_FUNCTION_PROXY_TYPE",
+ 164: "DEBUG_INFO_TYPE",
+ 165: "BREAK_POINT_INFO_TYPE",
}
@@ -864,79 +932,85 @@ INSTANCE_TYPES = {
# printf("}\n");
KNOWN_MAPS = {
0x08081: (134, "ByteArrayMap"),
- 0x080a1: (128, "MetaMap"),
- 0x080c1: (130, "OddballMap"),
- 0x080e1: (163, "FixedArrayMap"),
- 0x08101: (68, "AsciiSymbolMap"),
- 0x08121: (132, "HeapNumberMap"),
- 0x08141: (135, "FreeSpaceMap"),
- 0x08161: (146, "OnePointerFillerMap"),
- 0x08181: (146, "TwoPointerFillerMap"),
- 0x081a1: (131, "GlobalPropertyCellMap"),
- 0x081c1: (164, "SharedFunctionInfoMap"),
- 0x081e1: (4, "AsciiStringMap"),
- 0x08201: (163, "GlobalContextMap"),
- 0x08221: (129, "CodeMap"),
- 0x08241: (163, "ScopeInfoMap"),
- 0x08261: (163, "FixedCOWArrayMap"),
- 0x08281: (145, "FixedDoubleArrayMap"),
- 0x082a1: (163, "HashTableMap"),
- 0x082c1: (0, "StringMap"),
- 0x082e1: (64, "SymbolMap"),
- 0x08301: (1, "ConsStringMap"),
- 0x08321: (5, "ConsAsciiStringMap"),
- 0x08341: (3, "SlicedStringMap"),
- 0x08361: (7, "SlicedAsciiStringMap"),
- 0x08381: (65, "ConsSymbolMap"),
- 0x083a1: (69, "ConsAsciiSymbolMap"),
- 0x083c1: (66, "ExternalSymbolMap"),
- 0x083e1: (74, "ExternalSymbolWithAsciiDataMap"),
- 0x08401: (70, "ExternalAsciiSymbolMap"),
- 0x08421: (2, "ExternalStringMap"),
- 0x08441: (10, "ExternalStringWithAsciiDataMap"),
- 0x08461: (6, "ExternalAsciiStringMap"),
- 0x08481: (82, "ShortExternalSymbolMap"),
- 0x084a1: (90, "ShortExternalSymbolWithAsciiDataMap"),
- 0x084c1: (86, "ShortExternalAsciiSymbolMap"),
- 0x084e1: (18, "ShortExternalStringMap"),
- 0x08501: (26, "ShortExternalStringWithAsciiDataMap"),
- 0x08521: (22, "ShortExternalAsciiStringMap"),
- 0x08541: (0, "UndetectableStringMap"),
- 0x08561: (4, "UndetectableAsciiStringMap"),
- 0x08581: (144, "ExternalPixelArrayMap"),
- 0x085a1: (136, "ExternalByteArrayMap"),
- 0x085c1: (137, "ExternalUnsignedByteArrayMap"),
- 0x085e1: (138, "ExternalShortArrayMap"),
- 0x08601: (139, "ExternalUnsignedShortArrayMap"),
- 0x08621: (140, "ExternalIntArrayMap"),
- 0x08641: (141, "ExternalUnsignedIntArrayMap"),
- 0x08661: (142, "ExternalFloatArrayMap"),
- 0x08681: (143, "ExternalDoubleArrayMap"),
- 0x086a1: (163, "NonStrictArgumentsElementsMap"),
- 0x086c1: (163, "FunctionContextMap"),
- 0x086e1: (163, "CatchContextMap"),
- 0x08701: (163, "WithContextMap"),
- 0x08721: (163, "BlockContextMap"),
- 0x08741: (163, "ModuleContextMap"),
- 0x08761: (165, "JSMessageObjectMap"),
- 0x08781: (133, "ForeignMap"),
- 0x087a1: (170, "NeanderMap"),
- 0x087c1: (158, "PolymorphicCodeCacheMap"),
- 0x087e1: (156, "ScriptMap"),
- 0x08801: (147, "AccessorInfoMap"),
- 0x08821: (148, "AccessorPairMap"),
- 0x08841: (149, "AccessCheckInfoMap"),
- 0x08861: (150, "InterceptorInfoMap"),
- 0x08881: (151, "CallHandlerInfoMap"),
- 0x088a1: (152, "FunctionTemplateInfoMap"),
- 0x088c1: (153, "ObjectTemplateInfoMap"),
- 0x088e1: (154, "SignatureInfoMap"),
- 0x08901: (155, "TypeSwitchInfoMap"),
- 0x08921: (157, "CodeCacheMap"),
- 0x08941: (159, "TypeFeedbackInfoMap"),
- 0x08961: (160, "AliasedArgumentsEntryMap"),
- 0x08981: (161, "DebugInfoMap"),
- 0x089a1: (162, "BreakPointInfoMap"),
+ 0x080a9: (128, "MetaMap"),
+ 0x080d1: (130, "OddballMap"),
+ 0x080f9: (68, "AsciiSymbolMap"),
+ 0x08121: (166, "FixedArrayMap"),
+ 0x08149: (132, "HeapNumberMap"),
+ 0x08171: (135, "FreeSpaceMap"),
+ 0x08199: (146, "OnePointerFillerMap"),
+ 0x081c1: (146, "TwoPointerFillerMap"),
+ 0x081e9: (131, "GlobalPropertyCellMap"),
+ 0x08211: (167, "SharedFunctionInfoMap"),
+ 0x08239: (4, "AsciiStringMap"),
+ 0x08261: (166, "NativeContextMap"),
+ 0x08289: (129, "CodeMap"),
+ 0x082b1: (166, "ScopeInfoMap"),
+ 0x082d9: (166, "FixedCOWArrayMap"),
+ 0x08301: (145, "FixedDoubleArrayMap"),
+ 0x08329: (166, "HashTableMap"),
+ 0x08351: (0, "StringMap"),
+ 0x08379: (64, "SymbolMap"),
+ 0x083a1: (1, "ConsStringMap"),
+ 0x083c9: (5, "ConsAsciiStringMap"),
+ 0x083f1: (3, "SlicedStringMap"),
+ 0x08419: (7, "SlicedAsciiStringMap"),
+ 0x08441: (65, "ConsSymbolMap"),
+ 0x08469: (69, "ConsAsciiSymbolMap"),
+ 0x08491: (66, "ExternalSymbolMap"),
+ 0x084b9: (74, "ExternalSymbolWithAsciiDataMap"),
+ 0x084e1: (70, "ExternalAsciiSymbolMap"),
+ 0x08509: (2, "ExternalStringMap"),
+ 0x08531: (10, "ExternalStringWithAsciiDataMap"),
+ 0x08559: (6, "ExternalAsciiStringMap"),
+ 0x08581: (82, "ShortExternalSymbolMap"),
+ 0x085a9: (90, "ShortExternalSymbolWithAsciiDataMap"),
+ 0x085d1: (86, "ShortExternalAsciiSymbolMap"),
+ 0x085f9: (18, "ShortExternalStringMap"),
+ 0x08621: (26, "ShortExternalStringWithAsciiDataMap"),
+ 0x08649: (22, "ShortExternalAsciiStringMap"),
+ 0x08671: (0, "UndetectableStringMap"),
+ 0x08699: (4, "UndetectableAsciiStringMap"),
+ 0x086c1: (144, "ExternalPixelArrayMap"),
+ 0x086e9: (136, "ExternalByteArrayMap"),
+ 0x08711: (137, "ExternalUnsignedByteArrayMap"),
+ 0x08739: (138, "ExternalShortArrayMap"),
+ 0x08761: (139, "ExternalUnsignedShortArrayMap"),
+ 0x08789: (140, "ExternalIntArrayMap"),
+ 0x087b1: (141, "ExternalUnsignedIntArrayMap"),
+ 0x087d9: (142, "ExternalFloatArrayMap"),
+ 0x08801: (143, "ExternalDoubleArrayMap"),
+ 0x08829: (166, "NonStrictArgumentsElementsMap"),
+ 0x08851: (166, "FunctionContextMap"),
+ 0x08879: (166, "CatchContextMap"),
+ 0x088a1: (166, "WithContextMap"),
+ 0x088c9: (166, "BlockContextMap"),
+ 0x088f1: (166, "ModuleContextMap"),
+ 0x08919: (166, "GlobalContextMap"),
+ 0x08941: (168, "JSMessageObjectMap"),
+ 0x08969: (133, "ForeignMap"),
+ 0x08991: (173, "NeanderMap"),
+ 0x089b9: (158, "AllocationSiteInfoMap"),
+ 0x089e1: (161, "PolymorphicCodeCacheMap"),
+ 0x08a09: (159, "ScriptMap"),
+ 0x08a31: (173, ""),
+ 0x08a59: (173, "ExternalMap"),
+ 0x08a81: (147, "DeclaredAccessorDescriptorMap"),
+ 0x08aa9: (148, "DeclaredAccessorInfoMap"),
+ 0x08ad1: (149, "ExecutableAccessorInfoMap"),
+ 0x08af9: (150, "AccessorPairMap"),
+ 0x08b21: (151, "AccessCheckInfoMap"),
+ 0x08b49: (152, "InterceptorInfoMap"),
+ 0x08b71: (153, "CallHandlerInfoMap"),
+ 0x08b99: (154, "FunctionTemplateInfoMap"),
+ 0x08bc1: (155, "ObjectTemplateInfoMap"),
+ 0x08be9: (156, "SignatureInfoMap"),
+ 0x08c11: (157, "TypeSwitchInfoMap"),
+ 0x08c39: (160, "CodeCacheMap"),
+ 0x08c61: (162, "TypeFeedbackInfoMap"),
+ 0x08c89: (163, "AliasedArgumentsEntryMap"),
+ 0x08cb1: (164, "DebugInfoMap"),
+ 0x08cd9: (165, "BreakPointInfoMap"),
}
@@ -947,7 +1021,7 @@ KNOWN_MAPS = {
#
# #define ROOT_LIST_CASE(type, name, camel_name) \
# if (o == heap_.name()) n = #camel_name;
-# OldSpaces spit;
+# OldSpaces spit(heap());
# printf("KNOWN_OBJECTS = {\n");
# for (PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
# HeapObjectIterator it(s);
@@ -973,25 +1047,27 @@ KNOWN_OBJECTS = {
("OLD_POINTER_SPACE", 0x080f1): "NumberStringCache",
("OLD_POINTER_SPACE", 0x088f9): "SingleCharacterStringCache",
("OLD_POINTER_SPACE", 0x08b01): "StringSplitCache",
- ("OLD_POINTER_SPACE", 0x08f09): "TerminationException",
- ("OLD_POINTER_SPACE", 0x08f19): "MessageListeners",
- ("OLD_POINTER_SPACE", 0x08f35): "CodeStubs",
- ("OLD_POINTER_SPACE", 0x09b61): "NonMonomorphicCache",
- ("OLD_POINTER_SPACE", 0x0a175): "PolymorphicCodeCache",
- ("OLD_POINTER_SPACE", 0x0a17d): "NativesSourceCache",
- ("OLD_POINTER_SPACE", 0x0a1bd): "EmptyScript",
- ("OLD_POINTER_SPACE", 0x0a1f9): "IntrinsicFunctionNames",
- ("OLD_POINTER_SPACE", 0x24a49): "SymbolTable",
- ("OLD_DATA_SPACE", 0x08081): "EmptyFixedArray",
- ("OLD_DATA_SPACE", 0x080a1): "NanValue",
- ("OLD_DATA_SPACE", 0x0811d): "EmptyByteArray",
- ("OLD_DATA_SPACE", 0x08125): "EmptyString",
- ("OLD_DATA_SPACE", 0x08131): "EmptyDescriptorArray",
+ ("OLD_POINTER_SPACE", 0x08f09): "RegExpMultipleCache",
+ ("OLD_POINTER_SPACE", 0x09311): "TerminationException",
+ ("OLD_POINTER_SPACE", 0x09321): "MessageListeners",
+ ("OLD_POINTER_SPACE", 0x0933d): "CodeStubs",
+ ("OLD_POINTER_SPACE", 0x09fa5): "NonMonomorphicCache",
+ ("OLD_POINTER_SPACE", 0x0a5b9): "PolymorphicCodeCache",
+ ("OLD_POINTER_SPACE", 0x0a5c1): "NativesSourceCache",
+ ("OLD_POINTER_SPACE", 0x0a601): "EmptyScript",
+ ("OLD_POINTER_SPACE", 0x0a63d): "IntrinsicFunctionNames",
+ ("OLD_POINTER_SPACE", 0x0d659): "ObservationState",
+ ("OLD_POINTER_SPACE", 0x27415): "SymbolTable",
+ ("OLD_DATA_SPACE", 0x08099): "EmptyDescriptorArray",
+ ("OLD_DATA_SPACE", 0x080a1): "EmptyFixedArray",
+ ("OLD_DATA_SPACE", 0x080a9): "NanValue",
+ ("OLD_DATA_SPACE", 0x08125): "EmptyByteArray",
+ ("OLD_DATA_SPACE", 0x0812d): "EmptyString",
("OLD_DATA_SPACE", 0x08259): "InfinityValue",
("OLD_DATA_SPACE", 0x08265): "MinusZeroValue",
("OLD_DATA_SPACE", 0x08271): "PrototypeAccessors",
- ("CODE_SPACE", 0x12b81): "JsEntryCode",
- ("CODE_SPACE", 0x12c61): "JsConstructEntryCode",
+ ("CODE_SPACE", 0x0aea1): "JsEntryCode",
+ ("CODE_SPACE", 0x0b5c1): "JsConstructEntryCode",
}
@@ -1459,6 +1535,8 @@ class V8Heap(object):
def MapAlignmentMask(self):
if self.reader.arch == MD_CPU_ARCHITECTURE_AMD64:
return (1 << 4) - 1
+ elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
+ return (1 << 4) - 1
elif self.reader.arch == MD_CPU_ARCHITECTURE_X86:
return (1 << 5) - 1
@@ -1743,6 +1821,9 @@ CONTEXT_FOR_ARCH = {
MD_CPU_ARCHITECTURE_AMD64:
['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip',
'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'],
+ MD_CPU_ARCHITECTURE_ARM:
+ ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
+ 'r10', 'r11', 'r12', 'sp', 'lr', 'pc'],
MD_CPU_ARCHITECTURE_X86:
['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
}
@@ -1768,7 +1849,11 @@ def AnalyzeMinidump(options, minidump_name):
for r in CONTEXT_FOR_ARCH[reader.arch]:
print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
# TODO(vitalyr): decode eflags.
- print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
+ if reader.arch == MD_CPU_ARCHITECTURE_ARM:
+ print " cpsr: %s" % bin(reader.exception_context.cpsr)[2:]
+ else:
+ print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
+
print
print " modules:"
for module in reader.module_list.modules:
@@ -1839,7 +1924,15 @@ if __name__ == "__main__":
help="dump all information contained in the minidump")
parser.add_option("--symdir", dest="symdir", default=".",
help="directory containing *.pdb.sym file with symbols")
+ parser.add_option("--objdump",
+ default="/usr/bin/objdump",
+ help="objdump tool to use [default: %default]")
options, args = parser.parse_args()
+ if os.path.exists(options.objdump):
+ disasm.OBJDUMP_BIN = options.objdump
+ OBJDUMP_BIN = options.objdump
+ else:
+ print "Cannot find %s, falling back to default objdump" % options.objdump
if len(args) != 1:
parser.print_help()
sys.exit(1)
diff --git a/src/3rdparty/v8/tools/gyp/v8.gyp b/src/3rdparty/v8/tools/gyp/v8.gyp
index aad07c7..6ebf3f6 100644
--- a/src/3rdparty/v8/tools/gyp/v8.gyp
+++ b/src/3rdparty/v8/tools/gyp/v8.gyp
@@ -68,8 +68,8 @@
'USING_V8_SHARED',
],
},
- 'conditions': [
- ['OS=="android"', {
+ 'target_conditions': [
+ ['OS=="android" and _toolset=="target"', {
'libraries': [
'-llog',
],
@@ -77,6 +77,8 @@
'src/common/android/include',
],
}],
+ ],
+ 'conditions': [
['OS=="mac"', {
'xcode_settings': {
'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
@@ -99,7 +101,7 @@
},
{
'target_name': 'v8_snapshot',
- 'type': '<(library)',
+ 'type': 'static_library',
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
@@ -158,8 +160,9 @@
['armv7==1', {
# The ARM Architecture Manual mandates VFPv3 if NEON is
# available.
- # The current V8 doesn't use d16-d31, so for vfpv3-d16, we can
- # also enable vfp3 for the better performance.
+ # V8 does not use d16-d31 unless explicitly enabled
+ # (--enable_32dregs) or detected at run-time, so for vfpv3-d16,
+ # we can also enable vfp3 for the better performance.
'conditions': [
['arm_neon!=1 and arm_fpu!="vfpv3" and arm_fpu!="vfpv3-d16"', {
'variables': {
@@ -190,7 +193,7 @@
},
{
'target_name': 'v8_nosnapshot',
- 'type': '<(library)',
+ 'type': 'static_library',
'dependencies': [
'v8_base',
],
@@ -220,7 +223,7 @@
},
{
'target_name': 'v8_base',
- 'type': '<(library)',
+ 'type': 'static_library',
'variables': {
'optimize': 'max',
},
@@ -262,6 +265,7 @@
'../../src/circular-queue.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
+ '../../src/code-stubs-hydrogen.cc',
'../../src/code.h',
'../../src/codegen.cc',
'../../src/codegen.h',
@@ -341,6 +345,9 @@
'../../src/heap-inl.h',
'../../src/heap-profiler.cc',
'../../src/heap-profiler.h',
+ '../../src/heap-snapshot-generator-inl.h',
+ '../../src/heap-snapshot-generator.cc',
+ '../../src/heap-snapshot-generator.h',
'../../src/heap.cc',
'../../src/heap.h',
'../../src/hydrogen-instructions.cc',
@@ -352,8 +359,6 @@
'../../src/ic.h',
'../../src/incremental-marking.cc',
'../../src/incremental-marking.h',
- '../../src/inspector.cc',
- '../../src/inspector.h',
'../../src/interface.cc',
'../../src/interface.h',
'../../src/interpreter-irregexp.cc',
@@ -362,6 +367,7 @@
'../../src/isolate.h',
'../../src/json-parser.h',
'../../src/json-stringifier.h',
+ '../../src/jsregexp-inl.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
'../../src/lazy-instance.h',
@@ -374,9 +380,6 @@
'../../src/lithium.h',
'../../src/liveedit.cc',
'../../src/liveedit.h',
- '../../src/liveobjectlist-inl.h',
- '../../src/liveobjectlist.cc',
- '../../src/liveobjectlist.h',
'../../src/log-inl.h',
'../../src/log-utils.cc',
'../../src/log-utils.h',
@@ -385,6 +388,8 @@
'../../src/macro-assembler.h',
'../../src/mark-compact.cc',
'../../src/mark-compact.h',
+ '../../src/marking-thread.h',
+ '../../src/marking-thread.cc',
'../../src/messages.cc',
'../../src/messages.h',
'../../src/natives.h',
@@ -464,6 +469,8 @@
'../../src/strtod.h',
'../../src/stub-cache.cc',
'../../src/stub-cache.h',
+ '../../src/sweeper-thread.h',
+ '../../src/sweeper-thread.cc',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',
@@ -476,6 +483,7 @@
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
+ '../../src/uri.h',
'../../src/utils-inl.h',
'../../src/utils.cc',
'../../src/utils.h',
@@ -786,6 +794,7 @@
],
'experimental_library_files': [
'../../src/macros.py',
+ '../../src/symbol.js',
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js'
@@ -912,7 +921,7 @@
},
{
'target_name': 'preparser_lib',
- 'type': '<(library)',
+ 'type': 'static_library',
'include_dirs+': [
'../../src',
],
@@ -990,6 +999,47 @@
'toolsets': ['target'],
}],
],
+ 'variables': {
+ 'shim_headers_path': '<(SHARED_INTERMEDIATE_DIR)/shim_headers/<(_target_name)/<(_toolset)',
+ },
+ 'include_dirs++': [
+ '<(shim_headers_path)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs+++': [
+ '<(shim_headers_path)',
+ ],
+ },
+ 'actions': [
+ {
+ 'variables': {
+ 'generator_path': '../../../tools/generate_shim_headers/generate_shim_headers.py',
+ 'generator_args': [
+ '--headers-root', '../../include',
+ '--output-directory', '<(shim_headers_path)',
+ 'v8-debug.h',
+ 'v8-preparser.h',
+ 'v8-profiler.h',
+ 'v8-testing.h',
+ 'v8.h',
+ 'v8stdint.h',
+ ],
+ },
+ 'action_name': 'generate_<(_target_name)_shim_headers',
+ 'inputs': [
+ '<(generator_path)',
+ ],
+ 'outputs': [
+ '<!@pymod_do_main(generate_shim_headers <@(generator_args) --outputs)',
+ ],
+ 'action': ['python',
+ '<(generator_path)',
+ '<@(generator_args)',
+ '--generate',
+ ],
+ 'message': 'Generating <(_target_name) shim headers.',
+ },
+ ],
'link_settings': {
'libraries': [
'-lv8',
diff --git a/src/3rdparty/v8/tools/ll_prof.py b/src/3rdparty/v8/tools/ll_prof.py
index 3afe179..216929d 100755
--- a/src/3rdparty/v8/tools/ll_prof.py
+++ b/src/3rdparty/v8/tools/ll_prof.py
@@ -45,7 +45,7 @@ USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
- $ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
+ $ perf record -R -e cycles -c 10000 -f -i ./d8 bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
@@ -54,6 +54,16 @@ Perf logs can be collected using a command like:
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
+IMPORTANT:
+ The kernel has an internal maximum for events per second, it is 100K by
+ default. That's not enough for "-c 10000". Set it to some higher value:
+ $ echo 10000000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate
+ You can also make the warning about kernel address maps go away:
+ $ echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
+
+We have a convenience script that handles all of the above for you:
+ $ tools/run-llprof.sh ./d8 bench.js
+
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
@@ -75,6 +85,10 @@ class Code(object):
"""Code object."""
_id = 0
+ UNKNOWN = 0
+ V8INTERNAL = 1
+ FULL_CODEGEN = 2
+ OPTIMIZED = 3
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
@@ -88,6 +102,14 @@ class Code(object):
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
+ if name.startswith("LazyCompile:*"):
+ self.codetype = Code.OPTIMIZED
+ elif name.startswith("LazyCompile:"):
+ self.codetype = Code.FULL_CODEGEN
+ elif name.startswith("v8::internal::"):
+ self.codetype = Code.V8INTERNAL
+ else:
+ self.codetype = Code.UNKNOWN
def AddName(self, name):
assert self.name != name
@@ -185,7 +207,7 @@ class Code(object):
class CodePage(object):
"""Group of adjacent code objects."""
- SHIFT = 12 # 4K pages
+ SHIFT = 20 # 1M pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@@ -507,6 +529,7 @@ class Descriptor(object):
# for the gory details.
+# Reference: struct perf_file_header in kernel/tools/perf/util/header.h
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
@@ -520,6 +543,7 @@ TRACE_HEADER_DESC = Descriptor([
])
+# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
@@ -529,12 +553,13 @@ PERF_EVENT_ATTR_DESC = Descriptor([
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
- ("bt_type", "u32"),
+ ("bp_type", "u32"),
("bp_addr", "u64"),
- ("bp_len", "u64"),
+ ("bp_len", "u64")
])
+# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
@@ -542,6 +567,7 @@ PERF_EVENT_HEADER_DESC = Descriptor([
])
+# Reference: kernel/events/core.c
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
@@ -566,6 +592,7 @@ PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
+# Reference: /usr/include/perf_event.h, the comment for PERF_RECORD_SAMPLE.
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
@@ -702,8 +729,12 @@ class LibraryRepo(object):
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
+ if mmap_info.filename.endswith(".ko"):
+ dynamic_symbols = ""
+ else:
+ dynamic_symbols = "-T"
process = subprocess.Popen(
- "%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
+ "%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
@@ -795,7 +826,7 @@ def PrintReport(code_map, library_repo, arch, ticks, options):
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
- mmap_infos = [m for m in library_repo.infos]
+ mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
@@ -885,6 +916,9 @@ if __name__ == "__main__":
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
+ optimized_ticks = 0
+ generated_ticks = 0
+ v8_internal_ticks = 0
mmap_time = 0
sample_time = 0
@@ -928,6 +962,12 @@ if __name__ == "__main__":
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
+ if code.codetype == Code.OPTIMIZED:
+ optimized_ticks += 1
+ elif code.codetype == Code.FULL_CODEGEN:
+ generated_ticks += 1
+ elif code.codetype == Code.V8INTERNAL:
+ v8_internal_ticks += 1
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
@@ -947,12 +987,21 @@ if __name__ == "__main__":
PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
if not options.quiet:
+ def PrintTicks(number, total, description):
+ print("%10d %5.1f%% ticks in %s" %
+ (number, 100.0*number/total, description))
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
- print "%10d unaccounted ticks" % really_missed_ticks
+ unaccounted = "unaccounted ticks"
+ if really_missed_ticks > 0:
+ unaccounted += " (probably in the kernel, try --kernel)"
+ PrintTicks(really_missed_ticks, ticks, unaccounted)
+ PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
+ PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
+ PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
diff --git a/src/3rdparty/v8/tools/plot-timer-events b/src/3rdparty/v8/tools/plot-timer-events
new file mode 100755
index 0000000..221684d
--- /dev/null
+++ b/src/3rdparty/v8/tools/plot-timer-events
@@ -0,0 +1,70 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+ if ! expr "X${arg}" : "^X-" > /dev/null; then
+ log_file=${arg}
+ fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+ d8_public=`which d8`
+ if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+ D8_PATH=`pwd`/out/native
+ d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+ echo "d8 shell not found in $D8_PATH"
+ echo "To build, execute 'make native' from the V8 directory"
+ exit 1
+fi
+
+if [[ "$@" != *--distortion* ]]; then
+ # Try to find out how much the instrumentation overhead is.
+ calibration_log=calibration.log
+ calibration_script="for (var i = 0; i < 1000000; i++) print();"
+
+ $d8_exec --nocrankshaft --prof --logfile $calibration_log \
+ --log-timer-events -e "$calibration_script" > /dev/null
+ t_1_start=`grep "timer-event-start,\"V8.Execute\"" $calibration_log \
+ | tail -n1 | awk -F, '{print $3}'`
+ t_1_end=`grep "timer-event-end,\"V8.Execute\"" $calibration_log \
+ | tail -n1 | awk -F, '{print $3}'`
+ n_1=`grep "timer-event\|tick" $calibration_log | wc -l`
+
+ $d8_exec --nocrankshaft --prof --logfile $calibration_log \
+ --log-internal-timer-events -e "$calibration_script" > /dev/null
+ t_2_start=`grep "timer-event-start,\"V8.Execute\"" $calibration_log \
+ | tail -n1 | awk -F, '{print $3}'`
+ t_2_end=`grep "timer-event-end,\"V8.Execute\"" $calibration_log \
+ | tail -n1 | awk -F, '{print $3}'`
+ n_2=`grep "timer-event\|tick" $calibration_log | wc -l`
+
+ rm $calibration_log
+
+ # Overhead in picoseconds.
+ options=--distortion=
+ options+=`echo "1000*(($t_1_end - $t_1_start) - ($t_2_end - $t_2_start)) \
+ / ($n_1 - $n_2)" | bc`
+ echo $options
+fi
+
+echo -e "plot-range,$plot_range\ndistortion,$distortion" | cat - $log_file |
+ $d8_exec $tools_path/csvparser.js $tools_path/splaytree.js \
+ $tools_path/codemap.js $tools_path/profile.js $tools_path/profile_view.js \
+ $tools_path/logreader.js $tools_path/tickprocessor.js \
+ $tools_path/plot-timer-events.js -- $options $@ | less \
+ 2>/dev/null | gnuplot > timer-events.png
diff --git a/src/3rdparty/v8/tools/plot-timer-events.js b/src/3rdparty/v8/tools/plot-timer-events.js
new file mode 100644
index 0000000..5412f8e
--- /dev/null
+++ b/src/3rdparty/v8/tools/plot-timer-events.js
@@ -0,0 +1,510 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var kV8BinarySuffixes = ["/d8", "/libv8.so"];
+var kStackFrames = 8;
+
+var kTimerEventWidth = 0.33;
+var kExecutionFrameWidth = 0.2;
+var kStackFrameWidth = 0.1;
+var kGapWidth = 0.05;
+
+var kY1Offset = 10;
+
+var kResX = 1600;
+var kResY = 600;
+var kPauseLabelPadding = 5;
+var kNumPauseLabels = 7;
+var kTickHalfDuration = 0.5; // Milliseconds
+var kCodeKindLabelPadding = 100;
+var kMinRangeLength = 0.0005; // Milliseconds
+
+var num_timer_event = kY1Offset + 0.5;
+
+var kNumThreads = 2;
+var kExecutionThreadId = 0;
+
+function assert(something, message) {
+ if (!something) {
+ print(new Error(message).stack);
+ }
+}
+
+function TimerEvent(color, pause, thread_id) {
+ assert(thread_id >= 0 && thread_id < kNumThreads, "invalid thread id");
+ this.color = color;
+ this.pause = pause;
+ this.ranges = [];
+ this.thread_id = thread_id;
+ this.index = ++num_timer_event;
+}
+
+
+var TimerEvents = {
+ 'V8.Execute': new TimerEvent("#000000", false, 0),
+ 'V8.External': new TimerEvent("#3399FF", false, 0),
+ 'V8.CompileFullCode': new TimerEvent("#CC0000", true, 0),
+ 'V8.RecompileSynchronous': new TimerEvent("#CC0044", true, 0),
+ 'V8.RecompileParallel': new TimerEvent("#CC4499", false, 1),
+ 'V8.CompileEval': new TimerEvent("#CC4400", true, 0),
+ 'V8.Parse': new TimerEvent("#00CC00", true, 0),
+ 'V8.PreParse': new TimerEvent("#44CC00", true, 0),
+ 'V8.ParseLazy': new TimerEvent("#00CC44", true, 0),
+ 'V8.GCScavenger': new TimerEvent("#0044CC", true, 0),
+ 'V8.GCCompactor': new TimerEvent("#4444CC", true, 0),
+ 'V8.GCContext': new TimerEvent("#4400CC", true, 0),
+}
+
+
+Array.prototype.top = function() {
+ if (this.length == 0) return undefined;
+ return this[this.length - 1];
+}
+
+var event_stack = [];
+var last_time_stamp = [];
+
+for (var i = 0; i < kNumThreads; i++) {
+ event_stack[i] = [];
+ last_time_stamp[i] = -1;
+}
+
+
+function CodeKind(color, kinds) {
+ this.color = color;
+ this.in_execution = [];
+ this.stack_frames = [];
+ for (var i = 0; i < kStackFrames; i++) this.stack_frames.push([]);
+ this.kinds = kinds;
+}
+
+
+var CodeKinds = {
+ 'external ': new CodeKind("#3399FF", [-3]),
+ 'reg.exp. ': new CodeKind("#0000FF", [-2]),
+ 'runtime ': new CodeKind("#000000", [-1]),
+ 'full code': new CodeKind("#DD0000", [0]),
+ 'opt code ': new CodeKind("#00EE00", [1]),
+ 'code stub': new CodeKind("#FF00FF", [2]),
+ 'built-in ': new CodeKind("#AA00AA", [3]),
+ 'inl.cache': new CodeKind("#4444AA", [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
+}
+
+
+var xrange_start;
+var xrange_end;
+var obj_index = 0;
+var execution_pauses = [];
+var code_map = new CodeMap();
+
+var xrange_start_override = undefined;
+var xrange_end_override = undefined;
+var distortion_per_entry = 0.005; // Milliseconds
+var pause_tolerance = 0.005; // Milliseconds.
+
+var distortion = 0;
+
+
+function Range(start, end) {
+ // Everthing from here are in milliseconds.
+ this.start = start;
+ this.end = end;
+}
+
+
+function Tick(tick) {
+ this.tick = tick;
+}
+
+
+Range.prototype.duration = function() { return this.end - this.start; }
+
+
+function ProcessTimerEventStart(name, start) {
+ // Find out the thread id.
+ var new_event = TimerEvents[name];
+ if (new_event === undefined) return;
+ var thread_id = new_event.thread_id;
+
+ start = Math.max(last_time_stamp[thread_id] + kMinRangeLength, start);
+
+ // Last event on this thread is done with the start of this event.
+ var last_event = event_stack[thread_id].top();
+ if (last_event !== undefined) {
+ var new_range = new Range(last_time_stamp[thread_id], start);
+ last_event.ranges.push(new_range);
+ }
+ event_stack[thread_id].push(new_event);
+ last_time_stamp[thread_id] = start;
+}
+
+
+function ProcessTimerEventEnd(name, end) {
+ // Find out about the thread_id.
+ var finished_event = TimerEvents[name];
+ var thread_id = finished_event.thread_id;
+ assert(finished_event === event_stack[thread_id].pop(),
+ "inconsistent event stack");
+
+ end = Math.max(last_time_stamp[thread_id] + kMinRangeLength, end);
+
+ var new_range = new Range(last_time_stamp[thread_id], end);
+ finished_event.ranges.push(new_range);
+ last_time_stamp[thread_id] = end;
+}
+
+
+function ProcessCodeCreateEvent(type, kind, address, size, name) {
+ var code_entry = new CodeMap.CodeEntry(size, name);
+ code_entry.kind = kind;
+ code_map.addCode(address, code_entry);
+}
+
+
+function ProcessCodeMoveEvent(from, to) {
+ code_map.moveCode(from, to);
+}
+
+
+function ProcessCodeDeleteEvent(address) {
+ code_map.deleteCode(address);
+}
+
+
+function ProcessSharedLibrary(name, start, end) {
+ var code_entry = new CodeMap.CodeEntry(end - start, name);
+ code_entry.kind = -3; // External code kind.
+ for (var i = 0; i < kV8BinarySuffixes.length; i++) {
+ var suffix = kV8BinarySuffixes[i];
+ if (name.indexOf(suffix, name.length - suffix.length) >= 0) {
+ code_entry.kind = -1; // V8 runtime code kind.
+ break;
+ }
+ }
+ code_map.addLibrary(start, code_entry);
+}
+
+
+function FindCodeKind(kind) {
+ for (name in CodeKinds) {
+ if (CodeKinds[name].kinds.indexOf(kind) >= 0) {
+ return CodeKinds[name];
+ }
+ }
+}
+
+
+function ProcessTickEvent(pc, sp, timer, unused_x, unused_y, vmstate, stack) {
+ var tick = new Tick(timer);
+
+ var entry = code_map.findEntry(pc);
+ if (entry) FindCodeKind(entry.kind).in_execution.push(tick);
+
+ for (var i = 0; i < kStackFrames; i++) {
+ if (!stack[i]) break;
+ var entry = code_map.findEntry(stack[i]);
+ if (entry) FindCodeKind(entry.kind).stack_frames[i].push(tick);
+ }
+}
+
+
+function FindPlotRange() {
+ var start_found = (xrange_start_override || xrange_start_override == 0);
+ var end_found = (xrange_end_override || xrange_end_override == 0);
+ xrange_start = start_found ? xrange_start_override : Infinity;
+ xrange_end = end_found ? xrange_end_override : -Infinity;
+
+ if (start_found && end_found) return;
+
+ for (name in TimerEvents) {
+ var ranges = TimerEvents[name].ranges;
+ for (var i = 0; i < ranges.length; i++) {
+ if (ranges[i].start < xrange_start && !start_found) {
+ xrange_start = ranges[i].start;
+ }
+ if (ranges[i].end > xrange_end && !end_found) {
+ xrange_end = ranges[i].end;
+ }
+ }
+ }
+
+ for (codekind in CodeKinds) {
+ var ticks = CodeKinds[codekind].in_execution;
+ for (var i = 0; i < ticks.length; i++) {
+ if (ticks[i].tick < xrange_start && !start_found) {
+ xrange_start = ticks[i].tick;
+ }
+ if (ticks[i].tick > xrange_end && !end_found) {
+ xrange_end = ticks[i].tick;
+ }
+ }
+ }
+
+ // Set pause tolerance to something appropriate for the plot resolution
+ // to make it easier for gnuplot.
+ pause_tolerance = (xrange_end - xrange_start) / kResX / 10;
+}
+
+
+function parseTimeStamp(timestamp) {
+ distortion += distortion_per_entry;
+ return parseInt(timestamp) / 1000 - distortion;
+}
+
+
+function ParseArguments(args) {
+ var processor = new ArgumentsProcessor(args);
+ do {
+ if (!processor.parse()) break;
+ var result = processor.result();
+ var distortion = parseInt(result.distortion);
+ if (isNaN(distortion)) break;
+ // Convert picoseconds to milliseconds.
+ distortion_per_entry = distortion / 1000000;
+ var rangelimits = result.range.split(",");
+ var range_start = parseInt(rangelimits[0]);
+ var range_end = parseInt(rangelimits[1]);
+ xrange_start_override = isNaN(range_start) ? undefined : range_start;
+ xrange_end_override = isNaN(range_end) ? undefined : range_end;
+ return;
+ } while (false);
+ processor.printUsageAndExit();
+}
+
+
+function CollectData() {
+ // Collect data from log.
+ var logreader = new LogReader(
+ { 'timer-event-start': { parsers: [null, parseTimeStamp],
+ processor: ProcessTimerEventStart },
+ 'timer-event-end': { parsers: [null, parseTimeStamp],
+ processor: ProcessTimerEventEnd },
+ 'shared-library': { parsers: [null, parseInt, parseInt],
+ processor: ProcessSharedLibrary },
+ 'code-creation': { parsers: [null, parseInt, parseInt, parseInt, null],
+ processor: ProcessCodeCreateEvent },
+ 'code-move': { parsers: [parseInt, parseInt],
+ processor: ProcessCodeMoveEvent },
+ 'code-delete': { parsers: [parseInt],
+ processor: ProcessCodeDeleteEvent },
+ 'tick': { parsers: [parseInt, parseInt, parseTimeStamp,
+ null, null, parseInt, 'var-args'],
+ processor: ProcessTickEvent }
+ });
+
+ var line;
+ while (line = readline()) {
+ logreader.processLogLine(line);
+ }
+
+ // Collect execution pauses.
+ for (name in TimerEvents) {
+ var event = TimerEvents[name];
+ if (!event.pause) continue;
+ var ranges = event.ranges;
+ for (var j = 0; j < ranges.length; j++) execution_pauses.push(ranges[j]);
+ }
+ execution_pauses = MergeRanges(execution_pauses);
+}
+
+
+function DrawBar(row, color, start, end, width) {
+ obj_index++;
+ command = "set object " + obj_index + " rect";
+ command += " from " + start + ", " + (row - width);
+ command += " to " + end + ", " + (row + width);
+ command += " fc rgb \"" + color + "\"";
+ print(command);
+}
+
+
+function TicksToRanges(ticks) {
+ var ranges = [];
+ for (var i = 0; i < ticks.length; i++) {
+ var tick = ticks[i].tick;
+ ranges.push(new Range(tick - kTickHalfDuration, tick + kTickHalfDuration));
+ }
+ return ranges;
+}
+
+
+function MergeRanges(ranges) {
+ ranges.sort(function(a, b) { return a.start - b.start; });
+ var result = [];
+ var j = 0;
+ for (var i = 0; i < ranges.length; i = j) {
+ var merge_start = ranges[i].start;
+ if (merge_start > xrange_end) break; // Out of plot range.
+ var merge_end = ranges[i].end;
+ for (j = i + 1; j < ranges.length; j++) {
+ var next_range = ranges[j];
+ // Don't merge ranges if there is no overlap (including merge tolerance).
+ if (next_range.start > merge_end + pause_tolerance) break;
+ // Merge ranges.
+ if (next_range.end > merge_end) { // Extend range end.
+ merge_end = next_range.end;
+ }
+ }
+ if (merge_end < xrange_start) continue; // Out of plot range.
+ if (merge_end < merge_start) continue; // Not an actual range.
+ result.push(new Range(merge_start, merge_end));
+ }
+ return result;
+}
+
+
+function RestrictRangesTo(ranges, start, end) {
+ var result = [];
+ for (var i = 0; i < ranges.length; i++) {
+ if (ranges[i].start <= end && ranges[i].end >= start) {
+ result.push(new Range(Math.max(ranges[i].start, start),
+ Math.min(ranges[i].end, end)));
+ }
+ }
+ return result;
+}
+
+
+function GnuplotOutput() {
+ print("set terminal pngcairo size " + kResX + "," + kResY +
+ " enhanced font 'Helvetica,10'");
+ print("set yrange [0:" + (num_timer_event + 1) + "]");
+ print("set xlabel \"execution time in ms\"");
+ print("set xrange [" + xrange_start + ":" + xrange_end + "]");
+ print("set style fill pattern 2 bo 1");
+ print("set style rect fs solid 1 noborder");
+ print("set style line 1 lt 1 lw 1 lc rgb \"#000000\"");
+ print("set xtics out nomirror");
+ print("unset key");
+
+ var percentages = {};
+ var total = 0;
+ for (var name in TimerEvents) {
+ var event = TimerEvents[name];
+ var ranges = RestrictRangesTo(event.ranges, xrange_start, xrange_end);
+ ranges = MergeRanges(ranges);
+ var sum =
+ ranges.map(function(range) { return range.duration(); })
+ .reduce(function(a, b) { return a + b; }, 0);
+ percentages[name] = (sum / (xrange_end - xrange_start) * 100).toFixed(1);
+ }
+
+ // Name Y-axis.
+ var ytics = [];
+ for (name in TimerEvents) {
+ var index = TimerEvents[name].index;
+ ytics.push('"' + name + ' (' + percentages[name] + '%%)" ' + index);
+ }
+ ytics.push('"code kind being executed"' + ' ' + (kY1Offset - 1));
+ ytics.push('"top ' + kStackFrames + ' js stack frames"' + ' ' +
+ (kY1Offset - 2));
+ ytics.push('"pause times" 0');
+ print("set ytics out nomirror (" + ytics.join(', ') + ")");
+
+ // Plot timeline.
+ for (var name in TimerEvents) {
+ var event = TimerEvents[name];
+ var ranges = MergeRanges(event.ranges);
+ for (var i = 0; i < ranges.length; i++) {
+ DrawBar(event.index, event.color,
+ ranges[i].start, ranges[i].end,
+ kTimerEventWidth);
+ }
+ }
+
+ // Plot code kind gathered from ticks.
+ for (var name in CodeKinds) {
+ var code_kind = CodeKinds[name];
+ var offset = kY1Offset - 1;
+ // Top most frame.
+ var row = MergeRanges(TicksToRanges(code_kind.in_execution));
+ for (var j = 0; j < row.length; j++) {
+ DrawBar(offset, code_kind.color,
+ row[j].start, row[j].end, kExecutionFrameWidth);
+ }
+ offset = offset - 2 * kExecutionFrameWidth - kGapWidth;
+ // Javascript frames.
+ for (var i = 0; i < kStackFrames; i++) {
+ offset = offset - 2 * kStackFrameWidth - kGapWidth;
+ row = MergeRanges(TicksToRanges(code_kind.stack_frames[i]));
+ for (var j = 0; j < row.length; j++) {
+ DrawBar(offset, code_kind.color,
+ row[j].start, row[j].end, kStackFrameWidth);
+ }
+ }
+ }
+
+ // Add labels as legend for code kind colors.
+ var padding = kCodeKindLabelPadding * (xrange_end - xrange_start) / kResX;
+ var label_x = xrange_start;
+ var label_y = kY1Offset;
+ for (var name in CodeKinds) {
+ label_x += padding;
+ print("set label \"" + name + "\" at " + label_x + "," + label_y +
+ " textcolor rgb \"" + CodeKinds[name].color + "\"" +
+ " font \"Helvetica,9'\"");
+ }
+
+ if (execution_pauses.length == 0) {
+ // Force plot and return without plotting execution pause impulses.
+ print("plot 1/0");
+ return;
+ }
+
+ // Label the longest pauses.
+ execution_pauses.sort(
+ function(a, b) { return b.duration() - a.duration(); });
+
+ var max_pause_time = execution_pauses[0].duration();
+ padding = kPauseLabelPadding * (xrange_end - xrange_start) / kResX;
+ var y_scale = kY1Offset / max_pause_time / 2;
+ for (var i = 0; i < execution_pauses.length && i < kNumPauseLabels; i++) {
+ var pause = execution_pauses[i];
+ var label_content = (pause.duration() | 0) + " ms";
+ var label_x = pause.end + padding;
+ var label_y = Math.max(1, (pause.duration() * y_scale));
+ print("set label \"" + label_content + "\" at " +
+ label_x + "," + label_y + " font \"Helvetica,7'\"");
+ }
+
+ // Scale second Y-axis appropriately.
+ var y2range = max_pause_time * num_timer_event / kY1Offset * 2;
+ print("set y2range [0:" + y2range + "]");
+ // Plot graph with impulses as data set.
+ print("plot '-' using 1:2 axes x1y2 with impulses ls 1");
+ for (var i = 0; i < execution_pauses.length; i++) {
+ var pause = execution_pauses[i];
+ print(pause.end + " " + pause.duration());
+ }
+ print("e");
+}
+
+
+ParseArguments(arguments);
+CollectData();
+FindPlotRange();
+GnuplotOutput();
diff --git a/src/3rdparty/v8/tools/run-llprof.sh b/src/3rdparty/v8/tools/run-llprof.sh
new file mode 100755
index 0000000..d526170
--- /dev/null
+++ b/src/3rdparty/v8/tools/run-llprof.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+########## Global variable definitions
+
+# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
+MAXIMUM_SAMPLE_RATE=10000000
+SAMPLE_EVERY_N_CYCLES=10000
+SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
+KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
+
+########## Usage
+
+usage() {
+cat << EOF
+usage: $0 <benchmark_command>
+
+Executes <benchmark_command> under observation by the kernel's "perf" \
+framework, then calls the low level tick processor to analyze the results.
+EOF
+}
+
+if [ $# -eq 0 ] || [ "$1" == "-h" ] || [ "$1" == "--help" ] ; then
+ usage
+ exit 1
+fi
+
+########## Actual script execution
+
+ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
+if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
+ echo "Setting appropriate maximum sample rate..."
+ echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
+fi
+
+ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
+if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
+ echo "Disabling kernel address map restriction..."
+ echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
+fi
+
+echo "Running benchmark..."
+perf record -R -e cycles -c $SAMPLE_EVERY_N_CYCLES -f -i $@ --ll-prof
diff --git a/src/3rdparty/v8/tools/run-tests.py b/src/3rdparty/v8/tools/run-tests.py
index c09ea06..cb64b45 100755
--- a/src/3rdparty/v8/tools/run-tests.py
+++ b/src/3rdparty/v8/tools/run-tests.py
@@ -32,6 +32,7 @@ import multiprocessing
import optparse
import os
from os.path import join
+import shlex
import subprocess
import sys
import time
@@ -57,8 +58,10 @@ VARIANT_FLAGS = [[],
["--nocrankshaft"]]
MODE_FLAGS = {
"debug" : ["--nobreak-on-abort", "--nodead-code-elimination",
- "--enable-slow-asserts", "--debug-code", "--verify-heap"],
- "release" : ["--nobreak-on-abort", "--nodead-code-elimination"]}
+ "--nofold-constants", "--enable-slow-asserts",
+ "--debug-code", "--verify-heap"],
+ "release" : ["--nobreak-on-abort", "--nodead-code-elimination",
+ "--nofold-constants"]}
SUPPORTED_ARCHS = ["android_arm",
"android_ia32",
@@ -155,7 +158,7 @@ def ProcessOptions(options):
options.mode = tokens[1]
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode in ["debug", "release"]:
+ if not mode.lower() in ["debug", "release"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -176,6 +179,8 @@ def ProcessOptions(options):
print("Specifying --command-prefix disables network distribution, "
"running tests locally.")
options.no_network = True
+ options.command_prefix = shlex.split(options.command_prefix)
+ options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.no_stress:
@@ -189,7 +194,7 @@ def ProcessOptions(options):
if options.valgrind:
run_valgrind = os.path.join("tools", "run-valgrind.py")
# This is OK for distributed running, so we don't need to set no_network.
- options.command_prefix = ("python -u " + run_valgrind +
+ options.command_prefix = (["python", "-u", run_valgrind] +
options.command_prefix)
return True
diff --git a/src/3rdparty/v8/tools/run-valgrind.py b/src/3rdparty/v8/tools/run-valgrind.py
index 49c1b70..f25f7a1 100755
--- a/src/3rdparty/v8/tools/run-valgrind.py
+++ b/src/3rdparty/v8/tools/run-valgrind.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
diff --git a/src/3rdparty/v8/tools/test.py b/src/3rdparty/v8/tools/test.py
index b3b62b3..c528fc4 100755
--- a/src/3rdparty/v8/tools/test.py
+++ b/src/3rdparty/v8/tools/test.py
@@ -685,8 +685,10 @@ SUFFIX = {
'release' : '' }
FLAGS = {
'debug' : ['--nobreak-on-abort', '--nodead-code-elimination',
- '--enable-slow-asserts', '--debug-code', '--verify-heap'],
- 'release' : ['--nobreak-on-abort', '--nodead-code-elimination']}
+ '--nofold-constants', '--enable-slow-asserts',
+ '--debug-code', '--verify-heap'],
+ 'release' : ['--nobreak-on-abort', '--nodead-code-elimination',
+ '--nofold-constants']}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
diff --git a/src/3rdparty/v8/tools/testrunner/local/execution.py b/src/3rdparty/v8/tools/testrunner/local/execution.py
index 6004367..0f52616 100644
--- a/src/3rdparty/v8/tools/testrunner/local/execution.py
+++ b/src/3rdparty/v8/tools/testrunner/local/execution.py
@@ -90,7 +90,7 @@ class Runner(object):
self.indicator.Starting()
self._RunInternal(jobs)
self.indicator.Done()
- if self.failed:
+ if self.failed or self.remaining:
return 1
return 0
@@ -167,11 +167,11 @@ class Runner(object):
d8testflag = ["--test"]
if utils.IsWindows():
shell += ".exe"
- cmd = ([self.context.command_prefix] +
+ cmd = (self.context.command_prefix +
[os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
d8testflag +
test.suite.GetFlagsForTestCase(test, self.context) +
- [self.context.extra_flags])
+ self.context.extra_flags)
return cmd
diff --git a/src/3rdparty/v8/tools/testrunner/local/testsuite.py b/src/3rdparty/v8/tools/testrunner/local/testsuite.py
index de5cddd..473e8b1 100644
--- a/src/3rdparty/v8/tools/testrunner/local/testsuite.py
+++ b/src/3rdparty/v8/tools/testrunner/local/testsuite.py
@@ -30,6 +30,7 @@ import imp
import os
from . import statusfile
+from . import utils
class TestSuite(object):
@@ -88,6 +89,8 @@ class TestSuite(object):
used_rules = set()
for t in self.tests:
testname = self.CommonTestName(t)
+ if utils.IsWindows():
+ testname = testname.replace("\\", "/")
if testname in self.rules:
used_rules.add(testname)
outcomes = self.rules[testname]
diff --git a/src/3rdparty/v8/tools/testrunner/objects/context.py b/src/3rdparty/v8/tools/testrunner/objects/context.py
index b72284b..3ea215a 100644
--- a/src/3rdparty/v8/tools/testrunner/objects/context.py
+++ b/src/3rdparty/v8/tools/testrunner/objects/context.py
@@ -41,10 +41,10 @@ class Context():
def Pack(self):
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
- self.extra_flags]
+ self.command_prefix, self.extra_flags]
@staticmethod
def Unpack(packed):
# For the order of the fields, refer to Pack() above.
return Context(packed[0], packed[1], None, packed[2], False,
- packed[3], packed[4], "", packed[5])
+ packed[3], packed[4], packed[5], packed[6])
diff --git a/src/3rdparty/v8/tools/testrunner/server/compression.py b/src/3rdparty/v8/tools/testrunner/server/compression.py
index ce90c4f..d5ed415 100644
--- a/src/3rdparty/v8/tools/testrunner/server/compression.py
+++ b/src/3rdparty/v8/tools/testrunner/server/compression.py
@@ -30,7 +30,6 @@ import cStringIO as StringIO
try:
import ujson as json
except ImportError:
- print("You should install UltraJSON, it is much faster!")
import json
import os
import struct
diff --git a/src/3rdparty/v8/tools/tick-processor.html b/src/3rdparty/v8/tools/tick-processor.html
new file mode 100644
index 0000000..bc9f636
--- /dev/null
+++ b/src/3rdparty/v8/tools/tick-processor.html
@@ -0,0 +1,168 @@
+<!DOCTYPE html>
+<!-- Copyright 2012 the V8 project authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
+
+<html lang="en">
+<head>
+ <meta charset="utf-8"/>
+ <title>V8 Tick Processor</title>
+
+ <style type="text/css">
+ body {
+ font-family: Verdana, Arial, Helvetica, sans-serif;
+ font-size: 10pt;
+ }
+ h4 {
+ margin-bottom: 0px;
+ }
+ p {
+ margin-top: 0px;
+ }
+ </style>
+
+ <script src="splaytree.js"></script>
+ <script src="codemap.js"></script>
+ <script src="csvparser.js"></script>
+ <script src="consarray.js"></script>
+ <script src="profile.js"></script>
+ <script src="profile_view.js"></script>
+ <script src="logreader.js"></script>
+ <script src="tickprocessor.js"></script>
+
+ <script type="text/javascript">
+
+var v8log_content;
+var textout;
+
+function load_logfile(evt) {
+ textout.value = "";
+ var f = evt.target.files[0];
+ if (f) {
+ var reader = new FileReader();
+ reader.onload = function(event) {
+ v8log_content = event.target.result;
+ start_process();
+ };
+ reader.onerror = function(event) {
+ console.error("File could not be read! Code " + event.target.error.code);
+ };
+ reader.readAsText(f);
+ } else {
+ alert("Failed to load file");
+ }
+}
+
+function print(arg) {
+ textout.value+=arg+"\n";
+}
+
+function start_process() {
+ ArgumentsProcessor.DEFAULTS = {
+ logFileName: 'v8.log',
+ snapshotLogFileName: null,
+ platform: 'unix',
+ stateFilter: null,
+ callGraphSize: 5,
+ ignoreUnknown: false,
+ separateIc: false,
+ targetRootFS: '',
+ nm: 'nm'
+ };
+
+ var entriesProviders = {
+ 'unix': UnixCppEntriesProvider,
+ 'windows': WindowsCppEntriesProvider,
+ 'mac': MacCppEntriesProvider
+ };
+
+ var snapshotLogProcessor; // not used
+
+ var tickProcessor = new TickProcessor(
+ new (entriesProviders[ArgumentsProcessor.DEFAULTS.platform])(
+ ArgumentsProcessor.DEFAULTS.nm,
+ ArgumentsProcessor.DEFAULTS.targetRootFS),
+ ArgumentsProcessor.DEFAULTS.separateIc,
+ ArgumentsProcessor.DEFAULTS.callGraphSize,
+ ArgumentsProcessor.DEFAULTS.ignoreUnknown,
+ ArgumentsProcessor.DEFAULTS.stateFilter,
+ snapshotLogProcessor);
+
+ tickProcessor.processLogChunk(v8log_content);
+ tickProcessor.printStatistics();
+}
+
+function Load() {
+ document.getElementById('fileinput').addEventListener(
+ 'change', load_logfile, false);
+ textout = document.getElementById('textout');
+}
+</script>
+</head>
+<body onLoad="Load()">
+
+<h3 style="margin-top: 2px;">
+ Chrome V8 profiling log processor
+</h3>
+<p>
+Process V8's profiling information log (sampling profiler tick information)
+in your browser. Particularly useful if you don't have the V8 shell (d8)
+at hand on your system. You still have to run Chrome with the appropriate
+<a href="https://code.google.com/p/v8/wiki/ProfilingChromiumWithV8">
+ command line flags</a>
+to produce the profiling log.
+</p>
+<h4>Usage:</h4>
+<p>
+Click on the button and browse to the profiling log file (usually, v8.log).
+Process will start automatically and the output will be visible in the below
+text area.
+</p>
+<h4>Limitations and disclaimer:</h4>
+<p>
+This page offers a subset of the functionalities of the command-line tick
+processor utility in the V8 repository. In particular, this page cannot
+access the command-line utility that provides library symbol information,
+hence the [C++] section of the output stays empty. Also consider that this
+web-based tool is provided only for convenience and quick reference, you
+should refer to the
+<a href="https://code.google.com/p/v8/wiki/V8Profiler">
+ command-line</a>
+version for full output.
+</p>
+<p>
+<input type="file" id="fileinput" />
+</p>
+<p>
+<textarea name="myTextArea" cols="120" rows="40" wrap="off" id="textout"
+ readonly="yes"></textarea>
+</p>
+<p style="font-style:italic;">
+Copyright the V8 Authors - Last change to this page: 12/12/2012
+</p>
+
+
+</body>
+</html>
diff --git a/src/3rdparty/v8/tools/tickprocessor-driver.js b/src/3rdparty/v8/tools/tickprocessor-driver.js
index 313c6d4..02cb81a 100644
--- a/src/3rdparty/v8/tools/tickprocessor-driver.js
+++ b/src/3rdparty/v8/tools/tickprocessor-driver.js
@@ -55,6 +55,8 @@ var tickProcessor = new TickProcessor(
params.callGraphSize,
params.ignoreUnknown,
params.stateFilter,
- snapshotLogProcessor);
+ snapshotLogProcessor,
+ params.distortion,
+ params.range);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/src/3rdparty/v8/tools/tickprocessor.js b/src/3rdparty/v8/tools/tickprocessor.js
index f6e315d..c9ee101 100644
--- a/src/3rdparty/v8/tools/tickprocessor.js
+++ b/src/3rdparty/v8/tools/tickprocessor.js
@@ -73,7 +73,7 @@ function parseState(s) {
function SnapshotLogProcessor() {
LogReader.call(this, {
'code-creation': {
- parsers: [null, parseInt, parseInt, null, 'var-args'],
+ parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
processor: this.processCodeCreation },
'code-move': { parsers: [parseInt, parseInt],
processor: this.processCodeMove },
@@ -107,7 +107,7 @@ inherits(SnapshotLogProcessor, LogReader);
SnapshotLogProcessor.prototype.processCodeCreation = function(
- type, start, size, name, maybe_func) {
+ type, kind, start, size, name, maybe_func) {
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
var state = parseState(maybe_func[1]);
@@ -151,12 +151,14 @@ function TickProcessor(
callGraphSize,
ignoreUnknown,
stateFilter,
- snapshotLogProcessor) {
+ snapshotLogProcessor,
+ distortion,
+ range) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
'code-creation': {
- parsers: [null, parseInt, parseInt, null, 'var-args'],
+ parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
processor: this.processCodeCreation },
'code-move': { parsers: [parseInt, parseInt],
processor: this.processCodeMove },
@@ -167,13 +169,17 @@ function TickProcessor(
'snapshot-pos': { parsers: [parseInt, parseInt],
processor: this.processSnapshotPosition },
'tick': {
- parsers: [parseInt, parseInt, parseInt,
+ parsers: [parseInt, parseInt, parseInt, parseInt,
parseInt, parseInt, 'var-args'],
processor: this.processTick },
'heap-sample-begin': { parsers: [null, null, parseInt],
processor: this.processHeapSampleBegin },
'heap-sample-end': { parsers: [null, null],
processor: this.processHeapSampleEnd },
+ 'timer-event-start' : { parsers: [null, null, null],
+ processor: this.advanceDistortion },
+ 'timer-event-end' : { parsers: [null, null, null],
+ processor: this.advanceDistortion },
// Ignored events.
'profiler': null,
'function-creation': null,
@@ -194,6 +200,17 @@ function TickProcessor(
var ticks = this.ticks_ =
{ total: 0, unaccounted: 0, excluded: 0, gc: 0 };
+ distortion = parseInt(distortion);
+ // Convert picoseconds to nanoseconds.
+ this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
+ this.distortion = 0;
+ var rangelimits = range.split(",");
+ var range_start = parseInt(rangelimits[0]);
+ var range_end = parseInt(rangelimits[1]);
+ // Convert milliseconds to nanoseconds.
+ this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
+ this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
+
V8Profile.prototype.handleUnknownCode = function(
operation, addr, opt_stackPos) {
var op = Profile.Operation;
@@ -231,7 +248,7 @@ TickProcessor.VmStates = {
JS: 0,
GC: 1,
COMPILER: 2,
- PARALLEL_COMPILER_PROLOGUE: 3,
+ PARALLEL_COMPILER: 3,
OTHER: 4,
EXTERNAL: 5
};
@@ -309,7 +326,7 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
- type, start, size, name, maybe_func) {
+ type, kind, start, size, name, maybe_func) {
name = this.deserializedEntriesNames_[start] || name;
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
@@ -350,10 +367,16 @@ TickProcessor.prototype.includeTick = function(vmState) {
TickProcessor.prototype.processTick = function(pc,
sp,
+ ns_since_start,
is_external_callback,
tos_or_external_callback,
vmState,
stack) {
+ this.distortion += this.distortion_per_entry;
+ ns_since_start -= this.distortion;
+ if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
+ return;
+ }
this.ticks_.total++;
if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
if (!this.includeTick(vmState)) {
@@ -380,6 +403,11 @@ TickProcessor.prototype.processTick = function(pc,
};
+TickProcessor.prototype.advanceDistortion = function() {
+ this.distortion += this.distortion_per_entry;
+}
+
+
TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
if (space != 'Heap') return;
this.currentProducerProfile_ = new CallTree();
@@ -794,7 +822,11 @@ function ArgumentsProcessor(args) {
'--target': ['targetRootFS', '',
'Specify the target root directory for cross environment'],
'--snapshot-log': ['snapshotLogFileName', 'snapshot.log',
- 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)']
+ 'Specify snapshot log file to use (e.g. --snapshot-log=snapshot.log)'],
+ '--range': ['range', 'auto,auto',
+ 'Specify the range limit as [start],[end]'],
+ '--distortion': ['distortion', 0,
+ 'Specify the logging overhead in picoseconds']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
@@ -813,7 +845,9 @@ ArgumentsProcessor.DEFAULTS = {
ignoreUnknown: false,
separateIc: false,
targetRootFS: '',
- nm: 'nm'
+ nm: 'nm',
+ range: 'auto,auto',
+ distortion: 0
};
diff --git a/src/v8/v8.pri b/src/v8/v8.pri
index 3a563ff..085df8d 100644
--- a/src/v8/v8.pri
+++ b/src/v8/v8.pri
@@ -7,17 +7,19 @@ isEmpty(QT_ARCH) {
isEmpty(V8_TARGET_ARCH) {
# Detect target
- equals(QT_ARCH, x86_64)|contains(CONFIG, x86_64):V8_TARGET_ARCH = x64
- else:equals(QT_ARCH, "i386"): V8_TARGET_ARCH = ia32
- else:equals(QT_ARCH, "mips"): V8_TARGET_ARCH = mips
- else:equals(QT_ARCH, "arm"): V8_TARGET_ARCH = arm
- else:equals(QMAKE_HOST.arch, armv7l): V8_TARGET_ARCH = arm
- else:equals(QMAKE_HOST.arch, armv5tel): V8_TARGET_ARCH = arm
- else:equals(QMAKE_HOST.arch, x86_64): V8_TARGET_ARCH = x64
- else:equals(QMAKE_HOST.arch, x86): V8_TARGET_ARCH = ia32
- else:equals(QMAKE_HOST.arch, i386): V8_TARGET_ARCH = ia32
- else:equals(QMAKE_HOST.arch, i686): V8_TARGET_ARCH = ia32
- else:error("Couldn't detect supported v8 architecture ($$QMAKE_HOST.arch/$$QT_ARCH). Currently supported architectures are: x64, x86 and arm")
+ isEmpty(QT_TARGET_ARCH): \ # not a host build => current arch is target arch
+ QT_TARGET_ARCH = $$QT_ARCH
+
+ equals(QT_TARGET_ARCH, x86_64): \
+ V8_TARGET_ARCH = x64
+ else: equals(QT_TARGET_ARCH, "i386"): \
+ V8_TARGET_ARCH = ia32
+ else: equals(QT_TARGET_ARCH, "mips"): \
+ V8_TARGET_ARCH = mips
+ else: equals(QT_TARGET_ARCH, "arm"): \
+ V8_TARGET_ARCH = arm
+ else: \
+ error("Architecture $$QT_TARGET_ARCH is not supported by v8. Currently supported architectures are: x64, x86 and arm")
}
include($$PWD/v8base.pri)
@@ -65,6 +67,7 @@ SOURCES += \
$$V8SRC/checks.cc \
$$V8SRC/circular-queue.cc \
$$V8SRC/code-stubs.cc \
+ $$V8SRC/code-stubs-hydrogen.cc \
$$V8SRC/codegen.cc \
$$V8SRC/compilation-cache.cc \
$$V8SRC/compiler.cc \
@@ -96,11 +99,11 @@ SOURCES += \
$$V8SRC/handles.cc \
$$V8SRC/heap-profiler.cc \
$$V8SRC/heap.cc \
+ $$V8SRC/heap-snapshot-generator.cc \
$$V8SRC/hydrogen.cc \
$$V8SRC/hydrogen-instructions.cc \
$$V8SRC/ic.cc \
$$V8SRC/incremental-marking.cc \
- $$V8SRC/inspector.cc \
$$V8SRC/interface.cc \
$$V8SRC/interpreter-irregexp.cc \
$$V8SRC/isolate.cc \
@@ -108,10 +111,10 @@ SOURCES += \
$$V8SRC/lithium-allocator.cc \
$$V8SRC/lithium.cc \
$$V8SRC/liveedit.cc \
- $$V8SRC/liveobjectlist.cc \
$$V8SRC/log-utils.cc \
$$V8SRC/log.cc \
$$V8SRC/mark-compact.cc \
+ $$V8SRC/marking-thread.cc \
$$V8SRC/messages.cc \
$$V8SRC/objects.cc \
$$V8SRC/objects-printer.cc \
@@ -141,6 +144,7 @@ SOURCES += \
$$V8SRC/string-stream.cc \
$$V8SRC/strtod.cc \
$$V8SRC/stub-cache.cc \
+ $$V8SRC/sweeper-thread.cc \
$$V8SRC/token.cc \
$$V8SRC/transitions.cc \
$$V8SRC/type-info.cc \